diff --git a/.flake8 b/.flake8
index 18fba2c15..d64c2564a 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,17 @@
[flake8]
max-line-length = 125
-ignore = W503
+ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
+exclude =
+ # Do not traverse examples
+ examples,
+ # Do not include package initializers
+ __init__.py,
+ # No need to traverse our git directory
+ .git,
+ # There's no value in checking cache directories
+ __pycache__,
+ # No need to include the build path
+ build,
+ # This contains builds that we don't want to check
+ dist # This is generated with `python build .` for package releases
+# max-complexity = 10
diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml
index 3e968d179..de0d994c8 100644
--- a/.github/workflows/bench.yml
+++ b/.github/workflows/bench.yml
@@ -52,7 +52,19 @@ jobs:
ftype: q4_0
pr_comment_enabled: "true"
- if: ${{ github.event.inputs.gpu-series == 'Standard_NC4as_T4_v3' || github.event.schedule || github.event.pull_request || github.head_ref == 'master' || github.ref_name == 'master' || github.event.push.ref == 'refs/heads/master' }}
+ if: |
+ inputs.gpu-series == 'Standard_NC4as_T4_v3'
+ || (
+ github.event_name == 'schedule'
+ && github.ref_name == 'master'
+ && github.repository_owner == 'ggerganov'
+ )
+ || github.event_name == 'pull_request_target'
+ || (
+ github.event_name == 'push'
+ && github.event.ref == 'refs/heads/master'
+ && github.repository_owner == 'ggerganov'
+ )
steps:
- name: Clone
id: checkout
diff --git a/.github/workflows/python-lint.yml b/.github/workflows/python-lint.yml
index 5be17f157..a8d46f31d 100644
--- a/.github/workflows/python-lint.yml
+++ b/.github/workflows/python-lint.yml
@@ -20,5 +20,4 @@ jobs:
- name: flake8 Lint
uses: py-actions/flake8@v2
with:
- ignore: "E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503"
- exclude: "examples/*,examples/*/**,*/**/__init__.py,convert-hf-to-gguf-update.py"
+ plugins: "flake8-no-print"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 65796fe2e..91d791628 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,13 +3,14 @@
exclude: prompts/.*.txt
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v3.2.0
+ rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/PyCQA/flake8
- rev: 6.0.0
+ rev: 7.0.0
hooks:
- id: flake8
+ additional_dependencies: [flake8-no-print]
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 477c5b57c..aa65b0d6c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -103,6 +103,8 @@ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for
set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
"llama: max. batch size for using peer access")
option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF)
+option(LLAMA_CUDA_NO_VMM "llama: do not try to use CUDA VMM" OFF)
+
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
@@ -403,12 +405,16 @@ if (LLAMA_CUDA)
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
add_compile_definitions(GGML_USE_CUDA)
+ add_compile_definitions(GGML_CUDA_USE_GRAPHS)
if (LLAMA_CUDA_FORCE_DMMV)
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
endif()
if (LLAMA_CUDA_FORCE_MMQ)
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
endif()
+ if (LLAMA_CUDA_NO_VMM)
+ add_compile_definitions(GGML_CUDA_NO_VMM)
+ endif()
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
if (DEFINED LLAMA_CUDA_DMMV_Y)
@@ -425,7 +431,7 @@ if (LLAMA_CUDA)
if (LLAMA_STATIC)
if (WIN32)
- # As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library
+ # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt)
else ()
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
@@ -434,7 +440,11 @@ if (LLAMA_CUDA)
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
endif()
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver)
+ if (LLAMA_CUDA_NO_VMM)
+ # No VMM requested, no need to link directly with the cuda driver lib (libcuda.so)
+ else()
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver) # required by cuDeviceGetAttribute(), cuMemGetAllocationGranularity(...), ...
+ endif()
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# 52 == lowest CUDA 12 standard
diff --git a/Makefile b/Makefile
index 0a73f2a58..3fa56d13a 100644
--- a/Makefile
+++ b/Makefile
@@ -77,11 +77,10 @@ test: $(TEST_TARGETS)
./$$test_target $(CURDIR)/models/ggml-vocab-llama-bpe.gguf; \
./$$test_target $(CURDIR)/models/ggml-vocab-phi-3.gguf; \
./$$test_target $(CURDIR)/models/ggml-vocab-falcon.gguf; \
- ./$$test_target $(CURDIR)/models/ggml-vocab-deepseek-coder.gguf; \
- ./$$test_target $(CURDIR)/models/ggml-vocab-deepseek-llm.gguf; \
./$$test_target $(CURDIR)/models/ggml-vocab-bert-bge.gguf; \
./$$test_target $(CURDIR)/models/ggml-vocab-starcoder.gguf; \
./$$test_target $(CURDIR)/models/ggml-vocab-gpt-2.gguf; \
+ ./$$test_target $(CURDIR)/models/ggml-vocab-refact.gguf; \
elif [ "$$test_target" = "tests/test-tokenizer-1-spm" ]; then \
continue; \
elif [ "$$test_target" = "tests/test-tokenizer-1-bpe" ]; then \
@@ -434,7 +433,7 @@ ifdef LLAMA_CUDA
else
CUDA_PATH ?= /usr/local/cuda
endif
- MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
+ MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include -DGGML_CUDA_USE_GRAPHS
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
OBJS += ggml-cuda.o
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
diff --git a/README.md b/README.md
index 514ef3af1..ff00f9966 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@

-[](https://opensource.org/licenses/MIT)
+[](https://opensource.org/licenses/MIT) [](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml)
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
@@ -20,7 +20,8 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
### Hot topics
-- **BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920**
+- **Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021**
+- BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
- Model sharding instructions using `gguf-split` https://github.com/ggerganov/llama.cpp/discussions/6404
- Fix major bug in Metal batched inference https://github.com/ggerganov/llama.cpp/pull/6225
@@ -139,7 +140,6 @@ Typically finetunes of the base models below are supported as well.
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
-- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
**HTTP server**
@@ -175,6 +175,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
- [nat/openplayground](https://github.com/nat/openplayground)
- [Faraday](https://faraday.dev/) (proprietary)
- [LMStudio](https://lmstudio.ai/) (proprietary)
+- [Layla](https://play.google.com/store/apps/details?id=com.laylalite) (proprietary)
- [LocalAI](https://github.com/mudler/LocalAI) (MIT)
- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL)
- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile)
@@ -712,6 +713,8 @@ Building the program with BLAS support may lead to some performance improvements
To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
+Note: `convert.py` does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
+
```bash
# obtain the official LLaMA model weights and place them in ./models
ls ./models
@@ -933,17 +936,25 @@ If your issue is with model generation quality, then please at least scan the fo
### Android
+#### Build on Android using Termux
+[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required).
+```
+apt update && apt upgrade -y
+apt install git make cmake
+```
+
+It's recommended to move your model inside the `~/` directory for best performance:
+```
+cd storage/downloads
+mv model.gguf ~/
+```
+
+[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`.
+
#### Building the Project using Android NDK
-You can easily run `llama.cpp` on Android device with [termux](https://termux.dev/).
-
-First, install the essential packages for termux:
-```
-pkg install clang wget git cmake
-```
-Second, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
-
-You can execute the following commands on your computer to avoid downloading the NDK to your mobile. Of course, you can also do this in Termux.
+Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
+Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
```
$ mkdir build-android
$ cd build-android
@@ -951,7 +962,9 @@ $ export NDK=
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
$ make
```
-Install [termux](https://termux.dev/) on your device and run `termux-setup-storage` to get access to your SD card.
+
+Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
+
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
@@ -973,53 +986,10 @@ $cd /data/data/com.termux/files/home/bin
$./main -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
```
-Here is a demo of an interactive session running on Pixel 5 phone:
+Here's a demo of an interactive session running on Pixel 5 phone:
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
-#### Building the Project using Termux (F-Droid)
-Termux from F-Droid offers an alternative route to execute the project on an Android device. This method empowers you to construct the project right from within the terminal, negating the requirement for a rooted device or SD Card.
-
-Outlined below are the directives for installing the project using OpenBLAS and CLBlast. This combination is specifically designed to deliver peak performance on recent devices that feature a GPU.
-
-If you opt to utilize OpenBLAS, you'll need to install the corresponding package.
-```
-apt install libopenblas
-```
-
-Subsequently, if you decide to incorporate CLBlast, you'll first need to install the requisite OpenCL packages:
-```
-apt install ocl-icd opencl-headers opencl-clhpp clinfo
-```
-
-In order to compile CLBlast, you'll need to first clone the respective Git repository, which can be found at this URL: https://github.com/CNugteren/CLBlast. Alongside this, clone this repository into your home directory. Once this is done, navigate to the CLBlast folder and execute the commands detailed below:
-```
-cmake .
-make
-cp libclblast.so* $PREFIX/lib
-cp ./include/clblast.h ../llama.cpp
-```
-
-Following the previous steps, navigate to the LlamaCpp directory. To compile it with OpenBLAS and CLBlast, execute the command provided below:
-```
-cp /data/data/com.termux/files/usr/include/openblas/cblas.h .
-cp /data/data/com.termux/files/usr/include/openblas/openblas_config.h .
-make LLAMA_CLBLAST=1 //(sometimes you need to run this command twice)
-```
-
-Upon completion of the aforementioned steps, you will have successfully compiled the project. To run it using CLBlast, a slight adjustment is required: a command must be issued to direct the operations towards your device's physical GPU, rather than the virtual one. The necessary command is detailed below:
-```
-GGML_OPENCL_PLATFORM=0
-GGML_OPENCL_DEVICE=0
-export LD_LIBRARY_PATH=/vendor/lib64:$LD_LIBRARY_PATH
-```
-
-(Note: some Android devices, like the Zenfone 8, need the following command instead - "export LD_LIBRARY_PATH=/system/vendor/lib64:$LD_LIBRARY_PATH". Source: https://www.reddit.com/r/termux/comments/kc3ynp/opencl_working_in_termux_more_in_comments/ )
-
-For easy and swift re-execution, consider documenting this final part in a .sh script file. This will enable you to rerun the process with minimal hassle.
-
-Place your desired model into the `~/llama.cpp/models/` directory and execute the `./main (...)` script.
-
### Docker
#### Prerequisites
diff --git a/ci/run.sh b/ci/run.sh
index bf21b6b31..e67c1a5ff 100755
--- a/ci/run.sh
+++ b/ci/run.sh
@@ -160,9 +160,8 @@ function gg_run_test_scripts_debug {
set -e
- # TODO: too slow, run on dedicated node
- #(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
- #(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
+ (cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
+ (cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
set +e
}
@@ -695,8 +694,10 @@ test $ret -eq 0 && gg_run ctest_release
if [ -z ${GG_BUILD_LOW_PERF} ]; then
test $ret -eq 0 && gg_run embd_bge_small
- test $ret -eq 0 && gg_run test_scripts_debug
- test $ret -eq 0 && gg_run test_scripts_release
+ if [ -z ${GG_BUILD_CLOUD} ] || [ ${GG_BUILD_EXTRA_TESTS_0} ]; then
+ test $ret -eq 0 && gg_run test_scripts_debug
+ test $ret -eq 0 && gg_run test_scripts_release
+ fi
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
if [ -z ${GG_BUILD_CUDA} ]; then
diff --git a/common/common.cpp b/common/common.cpp
index 243b88abf..0535508ba 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -1,4 +1,6 @@
#include "common.h"
+// Change JSON_ASSERT from assert() to GGML_ASSERT:
+#define JSON_ASSERT GGML_ASSERT
#include "json.hpp"
#include "json-schema-to-grammar.h"
#include "llama.h"
@@ -76,7 +78,7 @@ int32_t get_num_physical_cores() {
// enumerate the set of thread siblings, num entries is num cores
std::unordered_set siblings;
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
- std::ifstream thread_siblings("/sys/devices/system/cpu"
+ std::ifstream thread_siblings("/sys/devices/system/cpu/cpu"
+ std::to_string(cpu) + "/topology/thread_siblings");
if (!thread_siblings.is_open()) {
break; // no more cpus
@@ -911,6 +913,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.instruct = true;
return true;
}
+ if (arg == "-cnv" || arg == "--conversation") {
+ params.conversation = true;
+ return true;
+ }
if (arg == "-cml" || arg == "--chatml") {
params.chatml = true;
return true;
@@ -1417,6 +1423,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" --version show version and build info\n");
printf(" -i, --interactive run in interactive mode\n");
printf(" --interactive-first run in interactive mode and wait for input right away\n");
+ printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
printf(" --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
@@ -1964,18 +1971,18 @@ static bool llama_download_file(const std::string & url, const std::string & pat
try {
metadata_in >> metadata;
fprintf(stderr, "%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str());
- if (metadata.contains("url") && metadata["url"].is_string()) {
- auto previous_url = metadata["url"].get();
+ if (metadata.contains("url") && metadata.at("url").is_string()) {
+ auto previous_url = metadata.at("url").get();
if (previous_url != url) {
fprintf(stderr, "%s: Model URL mismatch: %s != %s\n", __func__, url.c_str(), previous_url.c_str());
return false;
}
}
- if (metadata.contains("etag") && metadata["etag"].is_string()) {
- etag = metadata["etag"];
+ if (metadata.contains("etag") && metadata.at("etag").is_string()) {
+ etag = metadata.at("etag");
}
- if (metadata.contains("lastModified") && metadata["lastModified"].is_string()) {
- last_modified = metadata["lastModified"];
+ if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) {
+ last_modified = metadata.at("lastModified");
}
} catch (const nlohmann::json::exception & e) {
fprintf(stderr, "%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());
diff --git a/common/common.h b/common/common.h
index 9252a4b63..6f00a2cca 100644
--- a/common/common.h
+++ b/common/common.h
@@ -140,6 +140,7 @@ struct gpt_params {
bool random_prompt = false; // do not randomize prompt if none provided
bool use_color = false; // use color to distinguish generations and inputs
bool interactive = false; // interactive mode
+ bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
bool prompt_cache_all = false; // save user input and generations to prompt cache
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
diff --git a/common/json-schema-to-grammar.h b/common/json-schema-to-grammar.h
index e1abed303..41623b346 100644
--- a/common/json-schema-to-grammar.h
+++ b/common/json-schema-to-grammar.h
@@ -1,4 +1,8 @@
#pragma once
+
+#include "ggml.h"
+// Change JSON_ASSERT from assert() to GGML_ASSERT:
+#define JSON_ASSERT GGML_ASSERT
#include "json.hpp"
std::string json_schema_to_grammar(const nlohmann::ordered_json& schema);
diff --git a/common/log.h b/common/log.h
index 2b2f0e455..6934c57b2 100644
--- a/common/log.h
+++ b/common/log.h
@@ -234,7 +234,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
// INTERNAL, DO NOT USE
// USE LOG() INSTEAD
//
-#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
+#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) || defined(__clang__)
#define LOG_IMPL(str, ...) \
do { \
if (LOG_TARGET != nullptr) \
@@ -257,7 +257,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
// INTERNAL, DO NOT USE
// USE LOG_TEE() INSTEAD
//
-#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
+#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) || defined(__clang__)
#define LOG_TEE_IMPL(str, ...) \
do { \
if (LOG_TARGET != nullptr) \
diff --git a/common/sampling.cpp b/common/sampling.cpp
index cc83600d9..3715a7985 100644
--- a/common/sampling.cpp
+++ b/common/sampling.cpp
@@ -35,6 +35,8 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
result->prev.resize(params.n_prev);
+ result->n_considered = 0;
+
llama_sampling_set_rng_seed(result, params.seed);
return result;
@@ -64,6 +66,7 @@ void llama_sampling_reset(llama_sampling_context * ctx) {
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
ctx->cur.clear();
+ ctx->n_considered = 0;
}
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
@@ -253,6 +256,8 @@ static llama_token llama_sampling_sample_impl(
}
}
+ ctx_sampling->n_considered = cur_p.size;
+
return id;
}
diff --git a/common/sampling.h b/common/sampling.h
index cf7081e36..5b73ecdcd 100644
--- a/common/sampling.h
+++ b/common/sampling.h
@@ -81,6 +81,7 @@ struct llama_sampling_context {
// TODO: replace with ring-buffer
std::vector prev;
std::vector cur;
+ size_t n_considered;
std::mt19937 rng;
};
diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py
old mode 100644
new mode 100755
index b019c1e3d..e64687722
--- a/convert-hf-to-gguf-update.py
+++ b/convert-hf-to-gguf-update.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# This script downloads the tokenizer models of the specified models from Huggingface and
# generates the get_vocab_base_pre() function for convert-hf-to-gguf.py
#
@@ -21,6 +23,7 @@
# TODO: automate the update of convert-hf-to-gguf.py
#
+import logging
import os
import requests
import sys
@@ -28,49 +31,66 @@ import json
from hashlib import sha256
from enum import IntEnum, auto
+from transformers import AutoTokenizer
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger("convert-hf-to-gguf-update")
+
class TOKENIZER_TYPE(IntEnum):
SPM = auto()
BPE = auto()
WPM = auto()
+
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
# will be updated with time - contributions welcome
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
if len(sys.argv) == 2:
token = sys.argv[1]
+ if not token.startswith("hf_"):
+ logger.info("Huggingface token seems invalid")
+ logger.info("Usage: python convert-hf-to-gguf-update.py ")
+ sys.exit(1)
else:
- print("Usage: python convert-hf-to-gguf-update.py ")
+ logger.info("Usage: python convert-hf-to-gguf-update.py ")
sys.exit(1)
# TODO: add models here, base models preferred
models = [
- { "name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
- { "name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
- { "name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
- { "name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
- { "name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
- { "name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
- { "name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
- { "name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
- { "name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
- { "name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
- ]
+ {"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
+ {"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
+ {"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
+ {"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
+ {"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
+ {"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
+ {"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
+ {"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
+ {"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
+ {"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
+ {"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
+ {"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
+ {"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
+ {"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
+ {"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
+]
# make directory "models/tokenizers" if it doesn't exist
if not os.path.exists("models/tokenizers"):
os.makedirs("models/tokenizers")
+
def download_file_with_auth(url, token, save_path):
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(url, headers=headers)
if response.status_code == 200:
with open(save_path, 'wb') as f:
f.write(response.content)
- print(f"File {save_path} downloaded successfully")
+ logger.info(f"File {save_path} downloaded successfully")
else:
- print(f"Failed to download file. Status code: {response.status_code}")
+ logger.info(f"Failed to download file. Status code: {response.status_code}")
+
# download the tokenizer models
for model in models:
@@ -81,10 +101,10 @@ for model in models:
if not os.path.exists(f"models/tokenizers/{name}"):
os.makedirs(f"models/tokenizers/{name}")
else:
- print(f"Directory models/tokenizers/{name} already exists - skipping")
+ logger.info(f"Directory models/tokenizers/{name} already exists - skipping")
continue
- print(f"Downloading {name} to models/tokenizers/{name}")
+ logger.info(f"Downloading {name} to models/tokenizers/{name}")
url = f"{repo}/raw/main/config.json"
save_path = f"models/tokenizers/{name}/config.json"
@@ -94,6 +114,14 @@ for model in models:
save_path = f"models/tokenizers/{name}/tokenizer.json"
download_file_with_auth(url, token, save_path)
+ # if downloaded file is less than 1KB, we likely need to download an LFS instead
+ if os.path.getsize(save_path) < 1024:
+ # remove the file
+ os.remove(save_path)
+ url = f"{repo}/resolve/main/tokenizer.json"
+ save_path = f"models/tokenizers/{name}/tokenizer.json"
+ download_file_with_auth(url, token, save_path)
+
if tokt == TOKENIZER_TYPE.SPM:
url = f"{repo}/resolve/main/tokenizer.model"
save_path = f"models/tokenizers/{name}/tokenizer.model"
@@ -115,80 +143,84 @@ for model in models:
continue
# create the tokenizer
- from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
chktok = tokenizer.encode(chktxt)
chkhsh = sha256(str(chktok).encode()).hexdigest()
- print(f"model: {name}")
- print(f"tokt: {tokt}")
- print(f"repo: {model['repo']}")
- print(f"chktok: {chktok}")
- print(f"chkhsh: {chkhsh}")
+ logger.info(f"model: {name}")
+ logger.info(f"tokt: {tokt}")
+ logger.info(f"repo: {model['repo']}")
+ logger.info(f"chktok: {chktok}")
+ logger.info(f"chkhsh: {chkhsh}")
# print the "pre_tokenizer" content from the tokenizer.json
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
cfg = json.load(f)
+ normalizer = cfg["normalizer"]
+ logger.info("normalizer: " + json.dumps(normalizer, indent=4))
pre_tokenizer = cfg["pre_tokenizer"]
- print("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
+ logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
- print(f"\n")
+ logger.info("")
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
src_ifs += f" # ref: {model['repo']}\n"
src_ifs += f" res = \"{name}\"\n"
-src_func = ""
-src_func += " def get_vocab_base_pre(self, tokenizer) -> str:\n"
-src_func += " # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that\n"
-src_func += " # is specific for the BPE pre-tokenizer used by the model\n"
-src_func += " # we will use this unique identifier to write a \"tokenizer.ggml.pre\" entry in the GGUF file which we can\n"
-src_func += " # use in llama.cpp to implement the same pre-tokenizer\n"
-src_func += "\n"
-src_func += f" chktxt = {repr(chktxt)}\n"
-src_func += "\n"
-src_func += " chktok = tokenizer.encode(chktxt)\n"
-src_func += " chkhsh = sha256(str(chktok).encode()).hexdigest()\n"
-src_func += "\n"
-src_func += " print(f\"chktok: {chktok}\")\n"
-src_func += " print(f\"chkhsh: {chkhsh}\")\n"
-src_func += "\n"
-src_func += " res = None\n"
-src_func += "\n"
-src_func += " # NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script\n"
-src_func += " # or pull the latest version of the model from Huggingface\n"
-src_func += " # don't edit the hashes manually!\n"
-src_func += f"{src_ifs}\n"
-src_func += " if res is None:\n"
-src_func += " print(\"\\n\")\n"
-src_func += " print(\"**************************************************************************************\")\n"
-src_func += " print(\"** WARNING: The BPE pre-tokenizer was not recognized!\")\n"
-src_func += " print(\"** There are 2 possible reasons for this:\")\n"
-src_func += " print(\"** - the model has not been added to convert-hf-to-gguf-update.py yet\")\n"
-src_func += " print(\"** - the pre-tokenization config has changed upstream\")\n"
-src_func += " print(\"** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.\")\n"
-src_func += " print(\"** ref: https://github.com/ggerganov/llama.cpp/pull/6920\")\n"
-src_func += " print(\"**\")\n"
-src_func += " print(f\"** chkhsh: {chkhsh}\")\n"
-src_func += " print(\"**************************************************************************************\")\n"
-src_func += " print(\"\\n\")\n"
-src_func += " raise NotImplementedError(\"BPE pre-tokenizer was not recognized - update get_vocab_base_pre()\")\n"
-src_func += "\n"
-src_func += " print(f\"tokenizer.ggml.pre: {res}\")\n"
-src_func += " print(f\"chkhsh: {chkhsh}\")\n"
-src_func += "\n"
-src_func += " return res\n"
+src_func = f"""
+ def get_vocab_base_pre(self, tokenizer) -> str:
+ # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
+ # is specific for the BPE pre-tokenizer used by the model
+ # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
+ # use in llama.cpp to implement the same pre-tokenizer
-print(src_func)
+ chktxt = {repr(chktxt)}
-print("\n")
-print("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
-print("\n")
+ chktok = tokenizer.encode(chktxt)
+ chkhsh = sha256(str(chktok).encode()).hexdigest()
+
+ logger.debug(f"chktok: {{chktok}}")
+ logger.debug(f"chkhsh: {{chkhsh}}")
+
+ res = None
+
+ # NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
+ # or pull the latest version of the model from Huggingface
+ # don't edit the hashes manually!
+{src_ifs}
+ if res is None:
+ logger.warning("\\n")
+ logger.warning("**************************************************************************************")
+ logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
+ logger.warning("** There are 2 possible reasons for this:")
+ logger.warning("** - the model has not been added to convert-hf-to-gguf-update.py yet")
+ logger.warning("** - the pre-tokenization config has changed upstream")
+ logger.warning("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
+ logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
+ logger.warning("**")
+ logger.warning(f"** chkhsh: {{chkhsh}}")
+ logger.warning("**************************************************************************************")
+ logger.warning("\\n")
+ raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
+
+ logger.debug(f"tokenizer.ggml.pre: {{repr(res)}}")
+ logger.debug(f"chkhsh: {{chkhsh}}")
+
+ return res
+"""
+
+print(src_func) # noqa: NP100
+
+logger.info("\n")
+logger.info("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
+logger.info("\n")
# generate tests for each tokenizer model
tests = [
+ "ied 4 ½ months",
+ "Führer",
"",
" ",
" ",
@@ -229,6 +261,7 @@ tests = [
"3333333",
"33333333",
"333333333",
+ # "Cửa Việt", # llama-bpe fails on this
chktxt,
]
@@ -250,7 +283,6 @@ for model in models:
tokt = model["tokt"]
# create the tokenizer
- from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
@@ -265,15 +297,15 @@ for model in models:
f.write(f" {r}")
f.write("\n")
- print(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
+ logger.info(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
# generate commands for creating vocab files
-print("\nRun the following commands to generate the vocab files for testing:\n")
+logger.info("\nRun the following commands to generate the vocab files for testing:\n")
for model in models:
name = model["name"]
- print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only")
+ print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
-print("\n")
+logger.info("\n")
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index c6b2bfe3e..7ea9cbc44 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -2,17 +2,17 @@
from __future__ import annotations
+import logging
import argparse
import contextlib
import json
import os
import re
import sys
-from abc import ABC, abstractmethod
from enum import IntEnum
from pathlib import Path
from hashlib import sha256
-from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterator, Sequence, TypeVar, cast
+from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast, overload
import numpy as np
import torch
@@ -24,7 +24,9 @@ if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
-from convert import LlamaHfVocab, permute
+from convert import LlamaHfVocab
+
+logger = logging.getLogger("hf-to-gguf")
###### MODEL DEFINITIONS ######
@@ -41,29 +43,55 @@ class SentencePieceTokenTypes(IntEnum):
AnyModel = TypeVar("AnyModel", bound="type[Model]")
-class Model(ABC):
+class Model:
_model_classes: dict[str, type[Model]] = {}
- def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool):
+ dir_model: Path
+ ftype: int
+ fname_out: Path
+ is_big_endian: bool
+ endianess: gguf.GGUFEndian
+ use_temp_file: bool
+ lazy: bool
+ part_names: list[str]
+ is_safetensors: bool
+ hparams: dict[str, Any]
+ gguf_writer: gguf.GGUFWriter
+ block_count: int
+ tensor_map: gguf.TensorNameMap
+ tensor_names: set[str] | None
+
+ # subclasses should define this!
+ model_arch: gguf.MODEL_ARCH
+
+ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
+ if self.__class__ == Model:
+ raise TypeError(f"{self.__class__.__name__!r} should not be directly instantiated")
self.dir_model = dir_model
self.ftype = ftype
self.fname_out = fname_out
self.is_big_endian = is_big_endian
self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
self.use_temp_file = use_temp_file
- self.is_safetensors = self._is_model_safetensors()
- self.num_parts = Model.count_model_parts(self.dir_model, ".safetensors" if self.is_safetensors else ".bin")
- self.part_names = self._get_part_names()
+ self.lazy = not eager
+ self.part_names = Model.get_model_part_names(self.dir_model, ".safetensors")
+ self.is_safetensors = len(self.part_names) > 0
+ if not self.is_safetensors:
+ self.part_names = Model.get_model_part_names(self.dir_model, ".bin")
self.hparams = Model.load_hparams(self.dir_model)
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
+ self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
+ self.tensor_names = None
- @property
- @abstractmethod
- def model_arch(self) -> gguf.MODEL_ARCH:
- pass
+ @classmethod
+ def __init_subclass__(cls):
+ # can't use an abstract property, because overriding it without type errors
+ # would require using decorated functions instead of simply defining the property
+ if "model_arch" not in cls.__dict__:
+ raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
- def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any:
+ def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
key = next((k for k in keys if k in self.hparams), None)
if key is not None:
return self.hparams[key]
@@ -75,8 +103,24 @@ class Model(ABC):
self._set_vocab_gpt2()
def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
+ tensor_names_from_parts: set[str] = set()
+
+ if len(self.part_names) > 1:
+ self.tensor_names = set()
+ index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
+ index_name += ".index.json"
+ logger.info(f"gguf: loading model weight map from '{index_name}'")
+ with open(self.dir_model / index_name, "r", encoding="utf-8") as f:
+ index: dict[str, Any] = json.load(f)
+ weight_map = index.get("weight_map")
+ if weight_map is None or not isinstance(weight_map, dict):
+ raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
+ self.tensor_names.update(weight_map.keys())
+ else:
+ self.tensor_names = tensor_names_from_parts
+
for part_name in self.part_names:
- print(f"gguf: loading model part '{part_name}'")
+ logger.info(f"gguf: loading model part '{part_name}'")
ctx: ContextManager[Any]
if self.is_safetensors:
from safetensors import safe_open
@@ -85,59 +129,97 @@ class Model(ABC):
ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
with ctx as model_part:
+ tensor_names_from_parts.update(model_part.keys())
+
for name in model_part.keys():
data = model_part.get_tensor(name) if self.is_safetensors else model_part[name]
+ if self.lazy:
+ data = LazyTorchTensor.from_eager(data)
yield name, data
+ # only verify tensor name presence; it doesn't matter if they are not in the right files
+ if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
+ raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
+
+ def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
+ name: str = gguf.TENSOR_NAMES[key]
+ if key not in gguf.MODEL_TENSORS[self.model_arch]:
+ raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
+ if "{bid}" in name:
+ assert bid is not None
+ name = name.format(bid=bid)
+ return name + suffix
+
+ def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
+ new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
+ if new_name is None:
+ raise ValueError(f"Can not map tensor {name!r}")
+ return new_name
+
def set_gguf_parameters(self):
self.gguf_writer.add_name(self.dir_model.name)
self.gguf_writer.add_block_count(self.block_count)
if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
self.gguf_writer.add_context_length(n_ctx)
- print(f"gguf: context length = {n_ctx}")
+ logger.info(f"gguf: context length = {n_ctx}")
n_embd = self.find_hparam(["hidden_size", "n_embd"])
self.gguf_writer.add_embedding_length(n_embd)
- print(f"gguf: embedding length = {n_embd}")
+ logger.info(f"gguf: embedding length = {n_embd}")
if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
self.gguf_writer.add_feed_forward_length(n_ff)
- print(f"gguf: feed forward length = {n_ff}")
+ logger.info(f"gguf: feed forward length = {n_ff}")
n_head = self.find_hparam(["num_attention_heads", "n_head"])
self.gguf_writer.add_head_count(n_head)
- print(f"gguf: head count = {n_head}")
+ logger.info(f"gguf: head count = {n_head}")
if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
self.gguf_writer.add_head_count_kv(n_head_kv)
- print(f"gguf: key-value head count = {n_head_kv}")
+ logger.info(f"gguf: key-value head count = {n_head_kv}")
if (rope_theta := self.hparams.get("rope_theta")) is not None:
self.gguf_writer.add_rope_freq_base(rope_theta)
- print(f"gguf: rope theta = {rope_theta}")
+ logger.info(f"gguf: rope theta = {rope_theta}")
if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
- print(f"gguf: rms norm epsilon = {f_rms_eps}")
+ logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
self.gguf_writer.add_layer_norm_eps(f_norm_eps)
- print(f"gguf: layer norm epsilon = {f_norm_eps}")
+ logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
if (n_experts := self.hparams.get("num_local_experts")) is not None:
self.gguf_writer.add_expert_count(n_experts)
- print(f"gguf: expert count = {n_experts}")
+ logger.info(f"gguf: expert count = {n_experts}")
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
self.gguf_writer.add_expert_used_count(n_experts_used)
- print(f"gguf: experts used count = {n_experts_used}")
+ logger.info(f"gguf: experts used count = {n_experts_used}")
self.gguf_writer.add_file_type(self.ftype)
- print(f"gguf: file type = {self.ftype}")
+ logger.info(f"gguf: file type = {self.ftype}")
+
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
+ return [(self.map_tensor_name(name), data_torch)]
+
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid, n_dims # unused
+
+ return False
+
+ def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid, n_dims # unused
+
+ return False
def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
+
for name, data_torch in self.get_tensors():
# we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
+ if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
continue
old_dtype = data_torch.dtype
@@ -146,38 +228,52 @@ class Model(ABC):
if data_torch.dtype not in (torch.float16, torch.float32):
data_torch = data_torch.to(torch.float32)
- data = data_torch.squeeze().numpy()
+ # use the first number-like part of the tensor name as the block id
+ bid = None
+ for part in name.split("."):
+ if part.isdecimal():
+ bid = int(part)
+ break
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
+ data: np.ndarray = data # type hint
+ n_dims = len(data.shape)
+ data_dtype = data.dtype
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ # if f32 desired, convert any float16 to float32
+ if self.ftype == 0 and data_dtype == np.float16:
+ data = data.astype(np.float32)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ # when both are True, f32 should win
+ extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
+ extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
+ # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
+ extra_f32 = extra_f32 or n_dims == 1 or new_name.endswith("_norm.weight")
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ # if f16 desired, convert any float32 2-dim weight tensors to float16
+ extra_f16 = extra_f16 or (name.endswith(".weight") and n_dims >= 2)
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ # when both extra_f32 and extra_f16 are False, convert to float32 by default
+ if self.ftype == 1 and data_dtype == np.float16 and (extra_f32 or not extra_f16):
+ data = data.astype(np.float32)
- self.gguf_writer.add_tensor(new_name, data)
+ if self.ftype == 1 and data_dtype == np.float32 and extra_f16 and not extra_f32:
+ data = data.astype(np.float16)
+
+ # reverse shape to make it similar to the internal ggml dimension order
+ shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"
+
+ # n_dims is implicit in the shape
+ logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data.dtype}, shape = {shape_str}")
+
+ self.gguf_writer.add_tensor(new_name, data)
def write(self):
self.write_tensors()
self.gguf_writer.write_header_to_file()
self.gguf_writer.write_kv_data_to_file()
- self.gguf_writer.write_tensors_to_file()
+ self.gguf_writer.write_tensors_to_file(progress=True)
self.gguf_writer.close()
def write_vocab(self):
@@ -186,16 +282,18 @@ class Model(ABC):
self.gguf_writer.close()
@staticmethod
- def count_model_parts(dir_model: Path, prefix: str) -> int:
- num_parts = 0
+ def get_model_part_names(dir_model: Path, suffix: str) -> list[str]:
+ part_names: list[str] = []
for filename in os.listdir(dir_model):
- if filename.endswith(prefix):
- num_parts += 1
+ if filename.endswith(suffix):
+ part_names.append(filename)
- return num_parts
+ part_names.sort()
+
+ return part_names
@staticmethod
- def load_hparams(dir_model):
+ def load_hparams(dir_model: Path):
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
return json.load(f)
@@ -203,32 +301,19 @@ class Model(ABC):
def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
assert names
- def func(modelcls: type[Model]):
+ def func(modelcls: AnyModel) -> AnyModel:
for name in names:
cls._model_classes[name] = modelcls
return modelcls
return func
@classmethod
- def from_model_architecture(cls, arch):
+ def from_model_architecture(cls, arch: str) -> type[Model]:
try:
return cls._model_classes[arch]
except KeyError:
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
- def _is_model_safetensors(self) -> bool:
- return Model.count_model_parts(self.dir_model, ".safetensors") > 0
-
- def _get_part_names(self):
- if self.is_safetensors:
- if self.num_parts == 1: # there's only one .safetensors file
- return ("model.safetensors",)
- return (f"model-{n:05}-of-{self.num_parts:05}.safetensors" for n in range(1, self.num_parts + 1))
-
- if self.num_parts == 1: # there's only one .bin file
- return ("pytorch_model.bin",)
- return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1))
-
# used for GPT-2 BPE and WordPiece vocabs
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
tokens: list[str] = []
@@ -274,8 +359,8 @@ class Model(ABC):
chktok = tokenizer.encode(chktxt)
chkhsh = sha256(str(chktok).encode()).hexdigest()
- print(f"chktok: {chktok}")
- print(f"chkhsh: {chkhsh}")
+ logger.debug(f"chktok: {chktok}")
+ logger.debug(f"chkhsh: {chkhsh}")
res = None
@@ -306,24 +391,39 @@ class Model(ABC):
if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
# ref: https://huggingface.co/openai-community/gpt2
res = "gpt-2"
+ if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
+ # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
+ res = "refact"
+ if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
+ # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
+ res = "command-r"
+ if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
+ # ref: https://huggingface.co/Qwen/Qwen1.5-7B
+ res = "qwen2"
+ if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
+ # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
+ res = "olmo"
+ if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
+ # ref: https://huggingface.co/databricks/dbrx-instruct
+ res = "dbrx"
if res is None:
- print("\n")
- print("**************************************************************************************")
- print("** WARNING: The BPE pre-tokenizer was not recognized!")
- print("** There are 2 possible reasons for this:")
- print("** - the model has not been added to convert-hf-to-gguf-update.py yet")
- print("** - the pre-tokenization config has changed upstream")
- print("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
- print("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
- print("**")
- print(f"** chkhsh: {chkhsh}")
- print("**************************************************************************************")
- print("\n")
+ logger.warning("\n")
+ logger.warning("**************************************************************************************")
+ logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
+ logger.warning("** There are 2 possible reasons for this:")
+ logger.warning("** - the model has not been added to convert-hf-to-gguf-update.py yet")
+ logger.warning("** - the pre-tokenization config has changed upstream")
+ logger.warning("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
+ logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
+ logger.warning("**")
+ logger.warning(f"** chkhsh: {chkhsh}")
+ logger.warning("**************************************************************************************")
+ logger.warning("\n")
raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
- print(f"tokenizer.ggml.pre: {res}")
- print(f"chkhsh: {chkhsh}")
+ logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
+ logger.debug(f"chkhsh: {chkhsh}")
return res
@@ -403,22 +503,24 @@ class Model(ABC):
if not tokenizer_path.is_file():
raise FileNotFoundError(f"File not found: {tokenizer_path}")
- tokenizer = SentencePieceProcessor(str(tokenizer_path))
+ tokenizer = SentencePieceProcessor()
+ tokenizer.LoadFromFile(str(tokenizer_path))
+
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
for token_id in range(tokenizer.vocab_size()):
- piece = tokenizer.id_to_piece(token_id)
+ piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
- score = tokenizer.get_score(token_id)
+ score = tokenizer.GetScore(token_id)
toktype = SentencePieceTokenTypes.NORMAL
- if tokenizer.is_unknown(token_id):
+ if tokenizer.IsUnknown(token_id):
toktype = SentencePieceTokenTypes.UNKNOWN
- elif tokenizer.is_control(token_id):
+ elif tokenizer.IsControl(token_id):
toktype = SentencePieceTokenTypes.CONTROL
- elif tokenizer.is_unused(token_id):
+ elif tokenizer.IsUnused(token_id):
toktype = SentencePieceTokenTypes.UNUSED
- elif tokenizer.is_byte(token_id):
+ elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
tokens.append(text)
@@ -439,11 +541,9 @@ class Model(ABC):
if vocab_size > len(tokens):
pad_count = vocab_size - len(tokens)
- print(
- f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]"
- )
+ logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
for i in range(1, pad_count + 1):
- tokens.append(f"[PAD{i}]")
+ tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
scores.append(-1000.0)
toktypes.append(SentencePieceTokenTypes.UNUSED)
@@ -518,82 +618,52 @@ class BloomModel(Model):
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams["n_layer"]
- tensors = dict(self.get_tensors())
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- has_lm_head = True
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
- for name, data_torch in tensors.items():
- if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys():
- has_lm_head = False
+ name = re.sub(r'transformer\.', '', name)
- name = re.sub(r'transformer\.', '', name)
+ tensors: list[tuple[str, Tensor]] = []
- old_dtype = data_torch.dtype
+ if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
+ # Map bloom-style qkv_linear to gpt-style qkv_linear
+ # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
+ # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
+ qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
+ data_torch = torch.cat(
+ (
+ qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
+ qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
+ qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
+ ),
+ dim=0,
+ )
+ logger.info("re-format attention.linear_qkv.weight")
+ elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
+ qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
+ data_torch = torch.cat(
+ (
+ qkv_bias[:, 0, :].reshape((n_embed,)),
+ qkv_bias[:, 1, :].reshape((n_embed,)),
+ qkv_bias[:, 2, :].reshape((n_embed,)),
+ ),
+ dim=0,
+ )
+ logger.info("re-format attention.linear_qkv.bias")
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ tensors.append((self.map_tensor_name(name), data_torch))
- data = data_torch.squeeze().numpy()
+ if name == "word_embeddings.weight":
+ assert self.tensor_names is not None
- if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
- # Map bloom-style qkv_linear to gpt-style qkv_linear
- # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
- # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
- qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed))
- data = np.concatenate(
- (
- qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
- qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
- qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
- ),
- axis=0,
- )
- print("re-format attention.linear_qkv.weight")
- elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
- qkv_bias = data.reshape((n_head, 3, n_embed // n_head))
- data = np.concatenate(
- (
- qkv_bias[:, 0, :].reshape((n_embed,)),
- qkv_bias[:, 1, :].reshape((n_embed,)),
- qkv_bias[:, 2, :].reshape((n_embed,)),
- ),
- axis=0,
- )
- print("re-format attention.linear_qkv.bias")
+ # TODO: tie them at runtime, don't duplicate in the model file
+ if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- if not has_lm_head and name == "word_embeddings.weight":
- self.gguf_writer.add_tensor("output.weight", data)
- print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+ return tensors
@Model.register("MPTForCausalLM")
@@ -629,51 +699,16 @@ class MPTModel(Model):
else:
self.gguf_writer.add_max_alibi_bias(0.0)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers"))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- old_dtype = data_torch.dtype
+ if "scales" in name:
+ new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
+ new_name = new_name.replace("scales", "act.scales")
+ else:
+ new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- if "scales" in name:
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales"))
- if new_name is not None:
- new_name = new_name.replace("scales", "act.scales")
- else:
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(new_name, data_torch)]
@Model.register("OrionForCausalLM")
@@ -697,8 +732,7 @@ class OrionModel(Model):
elif "model_max_length" in self.hparams:
ctx_length = self.hparams["model_max_length"]
else:
- print("gguf: can not find ctx length parameter.")
- sys.exit()
+ raise ValueError("gguf: can not find ctx length parameter.")
self.gguf_writer.add_file_type(self.ftype)
self.gguf_writer.add_name(self.dir_model.name)
@@ -714,49 +748,6 @@ class OrionModel(Model):
# ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
- def write_tensors(self):
- # Collect tensors from generator object
- model_kv = dict(self.get_tensors())
- block_count = self.hparams["num_hidden_layers"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
-
@Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
class BaichuanModel(Model):
@@ -779,8 +770,7 @@ class BaichuanModel(Model):
elif "model_max_length" in self.hparams:
ctx_length = self.hparams["model_max_length"]
else:
- print("gguf: can not find ctx length parameter.")
- sys.exit()
+ raise ValueError("gguf: can not find ctx length parameter.")
self.gguf_writer.add_name(self.dir_model.name)
self.gguf_writer.add_source_hf_repo(hf_repo)
@@ -799,61 +789,26 @@ class BaichuanModel(Model):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
- def write_tensors(self):
- # Collect tensors from generator object
- model_kv = dict(self.get_tensors())
- block_count = self.hparams["num_hidden_layers"]
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
head_count = self.hparams["num_attention_heads"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
head_count_kv = self.hparams.get("num_key_value_heads", head_count)
- for i in range(block_count):
- if (w := model_kv.get(f"model.layers.{i}.self_attn.W_pack.weight")) is not None:
- print(f"Unpacking and permuting layer {i}")
- model_kv[f"model.layers.{i}.self_attn.q_proj.weight"] = \
- self._reverse_hf_permute_part(w, 0, head_count, head_count)
- model_kv[f"model.layers.{i}.self_attn.k_proj.weight"] = \
- self._reverse_hf_permute_part(w, 1, head_count, head_count_kv)
- model_kv[f"model.layers.{i}.self_attn.v_proj.weight"] = \
- self._reverse_hf_part(w, 2)
- del model_kv[f"model.layers.{i}.self_attn.W_pack.weight"]
+ tensors: list[tuple[str, Tensor]] = []
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
+ if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
+ logger.info(f"Unpacking and permuting layer {bid}")
+ tensors = [
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
+ self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
+ self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
+ self._reverse_hf_part(data_torch, 2)),
+ ]
+ else:
+ tensors = [(self.map_tensor_name(name), data_torch)]
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
+ return tensors
def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
if n_kv_head is not None and n_head != n_kv_head:
@@ -885,7 +840,7 @@ class XverseModel(Model):
dir_model = self.dir_model
hparams = self.hparams
- tokens: list[bytearray] = []
+ tokens: list[bytes] = []
toktypes: list[int] = []
from transformers import AutoTokenizer
@@ -893,7 +848,7 @@ class XverseModel(Model):
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
- reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
+ reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
added_vocab = tokenizer.get_added_vocab()
for token_id in range(vocab_size):
@@ -937,8 +892,7 @@ class XverseModel(Model):
elif "model_max_length" in self.hparams:
ctx_length = self.hparams["model_max_length"]
else:
- print("gguf: can not find ctx length parameter.")
- sys.exit()
+ raise ValueError("gguf: can not find ctx length parameter.")
self.gguf_writer.add_name(self.dir_model.name)
self.gguf_writer.add_source_hf_repo(hf_repo)
@@ -957,56 +911,19 @@ class XverseModel(Model):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
- def write_tensors(self):
- # Collect tensors from generator object
- model_kv = dict(self.get_tensors())
- block_count = self.hparams["num_hidden_layers"]
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
head_count = self.hparams["num_attention_heads"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
head_count_kv = self.hparams.get("num_key_value_heads", head_count)
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
+ # HF models permute some of the tensors, so we need to undo that
+ if name.endswith("q_proj.weight"):
+ data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
+ if name.endswith("k_proj.weight"):
+ data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # HF models permute some of the tensors, so we need to undo that
- if name.endswith(("q_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
- if name.endswith(("k_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
if n_kv_head is not None and n_head != n_kv_head:
@@ -1047,72 +964,31 @@ class FalconModel(Model):
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams.get("num_hidden_layers")
- if block_count is None:
- block_count = self.hparams["n_layer"] # old name
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- n_head = self.hparams.get("num_attention_heads")
- if n_head is None:
- n_head = self.hparams["n_head"] # old name
+ # QKV tensor transform
+ # The original query_key_value tensor contains n_head_kv "kv groups",
+ # each consisting of n_head/n_head_kv query weights followed by one key
+ # and one value weight (shared by all query heads in the kv group).
+ # This layout makes it a big pain to work with in GGML.
+ # So we rearrange them here,, so that we have n_head query weights
+ # followed by n_head_kv key weights followed by n_head_kv value weights,
+ # in contiguous fashion.
+ # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
- n_head_kv = self.hparams.get("num_kv_heads")
- if n_head_kv is None:
- n_head_kv = self.hparams.get("n_head_kv", 1) # old name
+ if "query_key_value" in name:
+ n_head = self.find_hparam(["num_attention_heads", "n_head"])
+ n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
+ head_dim = self.hparams["hidden_size"] // n_head
- head_dim = self.hparams["hidden_size"] // n_head
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
+ q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
+ k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
+ v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
+ data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
- for name, data_torch in self.get_tensors():
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # QKV tensor transform
- # The original query_key_value tensor contains n_head_kv "kv groups",
- # each consisting of n_head/n_head_kv query weights followed by one key
- # and one value weight (shared by all query heads in the kv group).
- # This layout makes it a big pain to work with in GGML.
- # So we rearrange them here,, so that we have n_head query weights
- # followed by n_head_kv key weights followed by n_head_kv value weights,
- # in contiguous fashion.
- # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
-
- if "query_key_value" in name:
- qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
- q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
- k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
- v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
- data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("GPTBigCodeForCausalLM")
@@ -1158,7 +1034,7 @@ class RefactModel(Model):
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
hidden_dim = self.hparams["n_embd"]
inner_dim = 4 * hidden_dim
hidden_dim = int(2 * inner_dim / 3)
@@ -1167,57 +1043,23 @@ class RefactModel(Model):
n_head = self.hparams["n_head"]
n_head_kv = 1
head_dim = self.hparams["n_embd"] // n_head
- block_count = self.hparams["n_layer"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ tensors: list[tuple[str, Tensor]] = []
- tensors = dict(self.get_tensors())
- for i in range(block_count):
- if (w := tensors.get(f"transformer.h.{i}.attn.kv.weight")) is not None:
- tensors[f"model.layers.{i}.self_attn.k_proj.weight"] = w[:n_head_kv * head_dim]
- tensors[f"model.layers.{i}.self_attn.v_proj.weight"] = w[n_head_kv * head_dim:]
- del tensors[f"transformer.h.{i}.attn.kv.weight"]
- if (w := tensors.get(f"transformer.h.{i}.attn.q.weight")) is not None:
- tensors[f"model.layers.{i}.self_attn.q_proj.weight"] = w
- del tensors[f"transformer.h.{i}.attn.q.weight"]
- if (w := tensors.get(f"transformer.h.{i}.mlp.gate_up_proj.weight")) is not None:
- tensors[f"model.layers.{i}.mlp.gate_proj.weight"] = w[:ff_dim]
- tensors[f"model.layers.{i}.mlp.up_proj.weight"] = w[ff_dim:]
- del tensors[f"transformer.h.{i}.mlp.gate_up_proj.weight"]
+ if bid is not None:
+ if name == f"transformer.h.{bid}.attn.kv.weight":
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
+ elif name == f"transformer.h.{bid}.attn.q.weight":
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
+ elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
- for name, data_torch in tensors.items():
- old_dtype = data_torch.dtype
+ if len(tensors) == 0:
+ tensors.append((self.map_tensor_name(name), data_torch))
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight",))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return tensors
@Model.register("PersimmonForCausalLM")
@@ -1252,23 +1094,11 @@ class PersimmonModel(Model):
# self.gguf_writer.add_bos_token_id(71013)
# self.gguf_writer.add_eos_token_id(71013)
- def write_tensors(self):
- block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid, n_dims # unused
- for name, data_torch in self.get_tensors():
- if name.endswith(".self_attention.rotary_emb.inv_freq"):
- continue
- old_dtype = data_torch.dtype
- # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
- data = data_torch.to(torch.float32).squeeze().numpy()
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
- n_dims = len(data.shape)
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
+ # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
+ return True
@Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
@@ -1298,86 +1128,67 @@ class StableLMModel(Model):
self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
+ _q_norms: list[dict[str, Tensor]] | None = None
+ _k_norms: list[dict[str, Tensor]] | None = None
+
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ n_head = self.hparams["num_attention_heads"]
+ n_kv_head = self.hparams["num_key_value_heads"]
+
+ if name.find("q_layernorm.norms") != -1:
+ assert bid is not None
+
+ if self._q_norms is None:
+ self._q_norms = [{} for _ in range(self.block_count)]
+
+ self._q_norms[bid][name] = data_torch
+
+ if len(self._q_norms[bid]) >= n_head:
+ return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
+ else:
+ return []
+
+ if name.find("k_layernorm.norms") != -1:
+ assert bid is not None
+
+ if self._k_norms is None:
+ self._k_norms = [{} for _ in range(self.block_count)]
+
+ self._k_norms[bid][name] = data_torch
+
+ if len(self._k_norms[bid]) >= n_kv_head:
+ return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
+ else:
+ return []
+
+ return [(self.map_tensor_name(name), data_torch)]
+
+ def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
+ datas: list[Tensor] = []
+ # extract the norms in order
+ for xid in range(n_head):
+ ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
+ datas.append(norms[ename])
+ del norms[ename]
+ data_torch = torch.stack(datas, dim=0)
+
+ merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
+ new_name = self.map_tensor_name(merged_name)
+
+ return [(new_name, data_torch)]
+
def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
- n_kv_head = self.hparams.get("num_key_value_heads")
- q_norms = dict()
- k_norms = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
+ super().write_tensors()
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
- n_dims = len(data.shape)
- if name.find("q_layernorm.norms") != -1:
- q_norms[name] = data
- if len(q_norms) >= (block_count * n_head):
- self._stack_qk_norm(block_count, name, tensor_map, n_head, q_norms, n_dims, layer_name="q_layernorm")
- continue
- if name.find("k_layernorm.norms") != -1:
- k_norms[name] = data
- if len(k_norms) >= (block_count * n_kv_head):
- self._stack_qk_norm(block_count, name, tensor_map, n_kv_head, k_norms, n_dims, layer_name="k_layernorm")
- continue
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, layer_name="q_layernorm"):
- for bid in range(block_count):
- datas = []
- for xid in range(n_head):
- ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
- datas.append(norms[ename])
- del norms[ename]
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
- merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ if self._q_norms is not None or self._k_norms is not None:
+ # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
+ norms = (
+ [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
+ ) + (
+ [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
+ )
+ if len(norms) > 0:
+ raise ValueError(f"Unprocessed norms: {norms}")
@Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
@@ -1417,104 +1228,69 @@ class LlamaModel(Model):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
- # Same as super class, but permuting q_proj, k_proj
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
+ @staticmethod
+ def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
+ if n_head_kv is not None and n_head != n_head_kv:
+ n_head = n_head_kv
+ return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
+ .swapaxes(1, 2)
+ .reshape(weights.shape))
+
+ _experts: list[dict[str, Tensor]] | None = None
+
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ n_head = self.hparams["num_attention_heads"]
n_kv_head = self.hparams.get("num_key_value_heads")
- n_experts = self.hparams.get("num_local_experts")
- experts = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
- old_dtype = data_torch.dtype
+ if name.endswith("q_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_head)
+ if name.endswith("k_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ # process the experts separately
+ if name.find("block_sparse_moe.experts") != -1:
+ n_experts = self.hparams["num_local_experts"]
- data = data_torch.numpy()
+ assert bid is not None
- if name.endswith("q_proj.weight"):
- data = permute(data, n_head, n_head)
- if name.endswith("k_proj.weight"):
- data = permute(data, n_head, n_kv_head)
+ if self._experts is None:
+ self._experts = [{} for _ in range(self.block_count)]
- data = data.squeeze()
+ self._experts[bid][name] = data_torch
- # process the experts separately
- if name.find("block_sparse_moe.experts") != -1:
- experts[name] = data
- if len(experts) >= n_experts:
- # merge the experts into a single 3d tensor
- for bid in range(block_count):
- for wid in range(1, 4):
- full = True
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
- if ename not in experts:
- full = False
- break
- if not full:
- continue
+ if len(self._experts[bid]) >= n_experts * 3:
+ tensors: list[tuple[str, Tensor]] = []
- datas = []
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
- datas.append(experts[ename])
- del experts[ename]
+ # merge the experts into a single 3d tensor
+ for wid in ["w1", "w2", "w3"]:
+ datas: list[Tensor] = []
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
+ for xid in range(n_experts):
+ ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
+ datas.append(self._experts[bid][ename])
+ del self._experts[bid][ename]
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ data_torch = torch.stack(datas, dim=0)
- if self.ftype == 1 and data_dtype == np.float32:
- data = data.astype(np.float16)
+ merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
- merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight"
+ new_name = self.map_tensor_name(merged_name)
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ tensors.append((new_name, data_torch))
+ return tensors
+ else:
+ return []
- print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+ return [(self.map_tensor_name(name), data_torch)]
- self.gguf_writer.add_tensor(new_name, data)
- continue
+ def write_tensors(self):
+ super().write_tensors()
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # 1d tensors need to be converted to float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- if len(experts) > 0:
- raise ValueError(f"Unprocessed experts: {experts.keys()}")
+ if self._experts is not None:
+ # flatten `list[dict[str, Tensor]]` into `list[str]`
+ experts = [k for d in self._experts for k in d.keys()]
+ if len(experts) > 0:
+ raise ValueError(f"Unprocessed experts: {experts}")
@Model.register("GrokForCausalLM")
@@ -1531,91 +1307,44 @@ class GrokModel(Model):
super().set_gguf_parameters()
self.gguf_writer.add_name("Grok")
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_experts = self.hparams.get("num_local_experts")
- experts = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
+ _experts: list[dict[str, Tensor]] | None = None
- old_dtype = data_torch.dtype
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ # process the experts separately
+ if name.find(".moe.") != -1:
+ n_experts = self.hparams["num_local_experts"]
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ assert bid is not None
- data = data_torch.squeeze().numpy()
+ if self._experts is None:
+ self._experts = [{} for _ in range(self.block_count)]
- # process the experts separately
- if name.find(".moe.") != -1:
- experts[name] = data
- if len(experts) >= n_experts:
- # merge the experts into a single 3d tensor
- for bid in range(block_count):
- for wid in ["linear", "linear_1", "linear_v"]:
- full = True
- for xid in range(n_experts):
- ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
- if ename not in experts:
- full = False
- break
- if not full:
- continue
+ self._experts[bid][name] = data_torch
- datas = []
- for xid in range(n_experts):
- ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
- datas.append(experts[ename])
- del experts[ename]
+ if len(self._experts[bid]) >= n_experts * 3:
+ tensors: list[tuple[str, Tensor]] = []
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
+ # merge the experts into a single 3d tensor
+ for wid in ["linear", "linear_1", "linear_v"]:
+ datas: list[Tensor] = []
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ for xid in range(n_experts):
+ ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
+ datas.append(self._experts[bid][ename])
+ del self._experts[bid][ename]
- if self.ftype == 1 and data_dtype == np.float32:
- data = data.astype(np.float16)
+ data_torch = torch.stack(datas, dim=0)
- merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
+ merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ new_name = self.map_tensor_name(merged_name)
- print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+ tensors.append((new_name, data_torch))
+ return tensors
+ else:
+ return []
- self.gguf_writer.add_tensor(new_name, data)
- continue
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("DbrxForCausalLM")
@@ -1646,72 +1375,47 @@ class DbrxModel(Model):
self.gguf_writer.add_layer_norm_eps(1e-5)
self.gguf_writer.add_file_type(self.ftype)
- print(f"gguf: file type = {self.ftype}")
+ logger.info(f"gguf: file type = {self.ftype}")
- def write_tensors(self):
- block_count = self.hparams.get("n_layers")
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- for name, data_torch in self.get_tensors():
- n_expert = self.hparams["ffn_config"]["moe_num_experts"]
- n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
- n_embd = self.hparams["d_model"]
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
- # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
- # But llama.cpp moe graph works differently
- # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
- # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
- exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
- "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
- "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
- experts = False
- for exp_tensor_name in exp_tensor_names.keys():
- if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
- experts = True
- data_torch = data_torch.view(n_expert, n_ff, n_embd)
- if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
- data_torch = data_torch.permute(*permute_tensor)
- break
+ n_expert = self.hparams["ffn_config"]["moe_num_experts"]
+ n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
+ n_embd = self.hparams["d_model"]
- old_dtype = data_torch.dtype
+ # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
+ # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
+ # But llama.cpp moe graph works differently
+ # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
+ # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
+ exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
+ "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
+ "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
+ experts = False
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ for exp_tensor_name in exp_tensor_names.keys():
+ if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
+ experts = True
+ data_torch = data_torch.view(n_expert, n_ff, n_embd)
+ if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
+ data_torch = data_torch.permute(*permute_tensor)
+ break
- data = data_torch.squeeze().numpy()
+ # map tensor names
+ # In MoE models the ffn tensors are typically most of the model weights,
+ # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
+ # Every other model has the weight names ending in .weight,
+ # let's assume that is the convention which is not the case for dbrx:
+ # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
+ new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
- # map tensor names
- # In MoE models the ffn tensors are typically most of the model weights,
- # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
- # Every other model has the weight names ending in .weight,
- # let's assume that is the convention which is not the case for dbrx:
- # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
- new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ return [(new_name, data_torch)]
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid # unused
- # Most of the codebase that takes in 1D tensors only handles F32 tensors
- # and most of the outputs tensors are F32.
- if data_dtype != np.float32 and n_dims == 1:
- print(f"Can not map tensor {name!r}: all 1D tensors must be F32")
- sys.exit()
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and n_dims > 1:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return n_dims > 1
@Model.register("MiniCPMForCausalLM")
@@ -1744,54 +1448,19 @@ class MiniCPMModel(Model):
.reshape(weights.shape)
)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
+ n_head = self.hparams["num_attention_heads"]
n_kv_head = self.hparams.get("num_key_value_heads")
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
- old_dtype = data_torch.dtype
+ # HF models permute some of the tensors, so we need to undo that
+ if name.endswith(("q_proj.weight")):
+ data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
+ if name.endswith(("k_proj.weight")):
+ data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # HF models permute some of the tensors, so we need to undo that
- if name.endswith(("q_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
- if name.endswith(("k_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("QWenLMHeadModel")
@@ -1835,47 +1504,6 @@ class QwenModel(Model):
self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
- def write_tensors(self):
- block_count = self.hparams["num_hidden_layers"]
- model_kv = dict(self.get_tensors())
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
-
@Model.register("Qwen2ForCausalLM")
class Qwen2Model(Model):
@@ -1897,94 +1525,52 @@ class Qwen2MoeModel(Model):
if (n_experts := self.hparams.get("num_experts")) is not None:
self.gguf_writer.add_expert_count(n_experts)
+ _experts: list[dict[str, Tensor]] | None = None
+
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ # process the experts separately
+ if name.find("experts") != -1:
+ n_experts = self.hparams["num_experts"]
+ assert bid is not None
+
+ if self._experts is None:
+ self._experts = [{} for _ in range(self.block_count)]
+
+ self._experts[bid][name] = data_torch
+
+ if len(self._experts[bid]) >= n_experts * 3:
+ tensors: list[tuple[str, Tensor]] = []
+
+ # merge the experts into a single 3d tensor
+ for w_name in ["down_proj", "gate_proj", "up_proj"]:
+ datas: list[Tensor] = []
+
+ for xid in range(n_experts):
+ ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
+ datas.append(self._experts[bid][ename])
+ del self._experts[bid][ename]
+
+ data_torch = torch.stack(datas, dim=0)
+
+ merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
+
+ new_name = self.map_tensor_name(merged_name)
+
+ tensors.append((new_name, data_torch))
+ return tensors
+ else:
+ return []
+
+ return [(self.map_tensor_name(name), data_torch)]
+
def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_experts = self.hparams.get("num_experts")
- experts = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
+ super().write_tensors()
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # process the experts separately
- if name.find("experts") != -1:
- experts[name] = data
- if len(experts) >= n_experts * 3:
- # merge the experts into a single 3d tensor
- for bid in range(block_count):
- for w_name in ["down_proj", "gate_proj", "up_proj"]:
- full = True
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
- if ename not in experts:
- full = False
- break
- if not full:
- continue
-
- datas = []
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
- datas.append(experts[ename])
- del experts[ename]
-
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
-
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- if self.ftype == 1 and data_dtype == np.float32:
- data = data.astype(np.float16)
-
- merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
-
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
- continue
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- if len(experts) > 0:
- raise ValueError(f"Unprocessed experts: {experts.keys()}")
+ if self._experts is not None:
+ # flatten `list[dict[str, Tensor]]` into `list[str]`
+ experts = [k for d in self._experts for k in d.keys()]
+ if len(experts) > 0:
+ raise ValueError(f"Unprocessed experts: {experts}")
@Model.register("GPT2LMHeadModel")
@@ -2001,55 +1587,27 @@ class GPT2Model(Model):
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias", ".attn.masked_bias")):
- continue
+ tensors: list[tuple[str, Tensor]] = []
- if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
- data_torch = data_torch.transpose(1, 0)
+ # we don't need these
+ if name.endswith((".attn.bias", ".attn.masked_bias")):
+ return tensors
- old_dtype = data_torch.dtype
+ if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
+ data_torch = data_torch.transpose(1, 0)
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ new_name = self.map_tensor_name(name)
- data = data_torch.squeeze().numpy()
+ tensors.append((new_name, data_torch))
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ # note: GPT2 output is tied to (same as) wte in original model
+ if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- # note: GPT2 output is tied to (same as) wte in original model
- if new_name == "token_embd.weight":
- print(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor("output.weight", data)
+ return tensors
@Model.register("PhiForCausalLM")
@@ -2087,10 +1645,10 @@ class Phi3MiniModel(Model):
tokenizer_path = self.dir_model / 'tokenizer.model'
if not tokenizer_path.is_file():
- print(f'Error: Missing {tokenizer_path}', file=sys.stderr)
- sys.exit(1)
+ raise ValueError(f'Error: Missing {tokenizer_path}')
- tokenizer = SentencePieceProcessor(str(tokenizer_path))
+ tokenizer = SentencePieceProcessor()
+ tokenizer.LoadFromFile(str(tokenizer_path))
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
@@ -2100,18 +1658,18 @@ class Phi3MiniModel(Model):
for token_id in range(tokenizer.vocab_size()):
- piece = tokenizer.id_to_piece(token_id)
+ piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
- score = tokenizer.get_score(token_id)
+ score = tokenizer.GetScore(token_id)
toktype = SentencePieceTokenTypes.NORMAL
- if tokenizer.is_unknown(token_id):
+ if tokenizer.IsUnknown(token_id):
toktype = SentencePieceTokenTypes.UNKNOWN
- elif tokenizer.is_control(token_id):
+ elif tokenizer.IsControl(token_id):
toktype = SentencePieceTokenTypes.CONTROL
- elif tokenizer.is_unused(token_id):
+ elif tokenizer.IsUnused(token_id):
toktype = SentencePieceTokenTypes.UNUSED
- elif tokenizer.is_byte(token_id):
+ elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
tokens[token_id] = text
@@ -2126,7 +1684,7 @@ class Phi3MiniModel(Model):
for key in added_tokens_json:
token_id = added_tokens_json[key]
if (token_id >= vocab_size):
- print(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
+ logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
continue
tokens[token_id] = key.encode("utf-8")
@@ -2197,52 +1755,18 @@ class PlamoModel(Model):
data_torch = torch.reshape(data_torch, (5120, 5120))
return data_torch
- def write_tensors(self):
- block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- for name, data_torch in self.get_tensors():
- if "self_attn.rotary_emb.inv_freq" in name:
- continue
+ new_name = self.map_tensor_name(name)
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ # shuffle for broadcasting of gqa in ggml_mul_mat
+ if new_name.endswith("attn_q.weight"):
+ data_torch = self.shuffle_attn_q_weight(data_torch)
+ elif new_name.endswith("attn_output.weight"):
+ data_torch = self.shuffle_attn_output_weight(data_torch)
- # shuffle for broadcasting of gqa in ggml_mul_mat
- if new_name.endswith("attn_q.weight"):
- data_torch = self.shuffle_attn_q_weight(data_torch)
- elif new_name.endswith("attn_output.weight"):
- data_torch = self.shuffle_attn_output_weight(data_torch)
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(new_name, data_torch)]
@Model.register("CodeShellForCausalLM")
@@ -2265,52 +1789,21 @@ class CodeShellModel(Model):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(1.0)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- tensors = dict(self.get_tensors())
- has_lm_head = "lm_head.weight" in tensors.keys() or "output.weight" in tensors.keys()
- for name, data_torch in tensors.items():
- # we don't need these
- if name.endswith((".attn.rotary_emb.inv_freq")):
- continue
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- old_dtype = data_torch.dtype
+ new_name = self.map_tensor_name(name)
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
- data = data_torch.squeeze().numpy()
+ if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
+ assert self.tensor_names is not None
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
+ # copy tok_embd.weight to output.weight
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- if not has_lm_head and name == "transformer.wte.weight":
- self.gguf_writer.add_tensor("output.weight", data)
- print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+ return tensors
@Model.register("InternLM2ForCausalLM")
@@ -2332,34 +1825,36 @@ class InternLM2Model(Model):
toktypes: list[int] = []
if not tokenizer_path.is_file():
- print(f'Error: Missing {tokenizer_path}', file=sys.stderr)
+ logger.error(f'Error: Missing {tokenizer_path}')
sys.exit(1)
sentencepiece_model = model.ModelProto()
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
- tokenizer = SentencePieceProcessor(str(tokenizer_path))
+ tokenizer = SentencePieceProcessor()
+ tokenizer.LoadFromFile(str(tokenizer_path))
+
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
for token_id in range(vocab_size):
- piece = tokenizer.id_to_piece(token_id)
+ piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
- score = tokenizer.get_score(token_id)
+ score = tokenizer.GetScore(token_id)
if text == b"\x00":
# (TODO): fixme
# Hack here and replace the \x00 characters.
- print(f"InternLM2 convert token '{text}' to '🐉'!")
- text = "🐉"
+ logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
+ text = "🐉".encode("utf-8")
toktype = SentencePieceTokenTypes.NORMAL
- if tokenizer.is_unknown(token_id):
+ if tokenizer.IsUnknown(token_id):
toktype = SentencePieceTokenTypes.UNKNOWN
- elif tokenizer.is_control(token_id):
+ elif tokenizer.IsControl(token_id):
toktype = SentencePieceTokenTypes.CONTROL
- elif tokenizer.is_unused(token_id):
+ elif tokenizer.IsUnused(token_id):
toktype = SentencePieceTokenTypes.UNUSED
- elif tokenizer.is_byte(token_id):
+ elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
tokens.append(text)
@@ -2390,19 +1885,21 @@ class InternLM2Model(Model):
# TODO: this is a hack, should be fixed
# https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
special_vocab.special_token_ids["eos"] = self._try_get_sft_eos(tokenizer)
- print(f"Replace eos:{old_eos} with a special token:{special_vocab.special_token_ids['eos']} \
+ logger.warning(f"Replace eos:{old_eos} with a special token:{special_vocab.special_token_ids['eos']} \
in chat mode so that the conversation can end normally.")
special_vocab.add_to_gguf(self.gguf_writer)
def _try_get_sft_eos(self, tokenizer):
- unused_145_list = tokenizer.encode('[UNUSED_TOKEN_145]')
- im_end_list = tokenizer.encode('<|im_end|>')
+ unused_145_list = tokenizer.Encode('[UNUSED_TOKEN_145]')
+ im_end_list = tokenizer.Encode('<|im_end|>')
+ eos_token = None
assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1)
if len(unused_145_list) == 1:
eos_token = unused_145_list[0]
if len(im_end_list) == 1:
eos_token = im_end_list[0]
+ assert eos_token
return eos_token
def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int):
@@ -2423,72 +1920,36 @@ in chat mode so that the conversation can end normally.")
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
- def post_write_tensors(self, tensor_map, name, data_torch):
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
-
- def write_tensors(self):
- from einops import rearrange
-
- num_heads = self.hparams.get("num_attention_heads")
- num_kv_heads = self.hparams.get("num_key_value_heads")
- hidden_size = self.hparams.get("hidden_size")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ num_heads = self.hparams["num_attention_heads"]
+ num_kv_heads = self.hparams["num_key_value_heads"]
+ hidden_size = self.hparams["hidden_size"]
q_per_kv = num_heads // num_kv_heads
head_dim = hidden_size // num_heads
num_groups = num_heads // q_per_kv
- block_count = self.hparams["num_hidden_layers"]
- model_kv = dict(self.get_tensors())
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
qkv_pattern = r"model\.layers\.(\d+)\.attention\.wqkv"
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
- if re.match(qkv_pattern, name):
- bid = re.findall(qkv_pattern, name)[0]
- qkv = data_torch
- qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim)
- q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :]
- # The model weights of q and k equire additional reshape.
- q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads)
- k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads)
- v = rearrange(v, " o g n i -> o (g n i)").T
- self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wq.weight", q)
- self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wk.weight", k)
- self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wv.weight", v)
- else:
- self.post_write_tensors(tensor_map, name, data_torch)
+ if re.match(qkv_pattern, name):
+ bid = re.findall(qkv_pattern, name)[0]
+ qkv = data_torch
+ # qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim)
+ qkv = qkv.T.reshape((-1, num_groups, q_per_kv + 2, head_dim))
+ q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :]
+ # The model weights of q and k equire additional reshape.
+ # q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads)
+ q = self._hf_permute_qk(q.reshape((q.shape[0], -1)).T, num_heads, num_heads)
+ # k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads)
+ k = self._hf_permute_qk(k.reshape((k.shape[0], -1)).T, num_heads, num_kv_heads)
+ # v = rearrange(v, " o g n i -> o (g n i)").T
+ v = v.reshape((v.shape[0], -1)).T
+ return [
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
+ ]
+ else:
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("BertModel", "CamembertModel")
@@ -2553,44 +2014,20 @@ class BertModel(Model):
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
special_vocab.add_to_gguf(self.gguf_writer)
- def write_tensors(self):
- tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
- tensors = dict(self.get_tensors())
- for name, data_torch in tensors.items():
- # we are only using BERT for embeddings so we don't need the pooling layer
- if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
- continue # we don't need these
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ # we are only using BERT for embeddings so we don't need the pooling layer
+ if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
+ return [] # we don't need these
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ return [(self.map_tensor_name(name), data_torch)]
- data = data_torch.squeeze().numpy()
- n_dims = len(data.shape)
- new_dtype: type[np.floating[Any]]
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del new_name, bid, n_dims # unused
- if (
- self.ftype == 1 and name.endswith(".weight") and n_dims == 2
- and name != "embeddings.token_type_embeddings.weight" # not used with get_rows, must be F32
- ):
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- new_dtype = np.float16
- else:
- # if f32 desired, convert any float16 to float32
- new_dtype = np.float32
-
- print(f"{new_name}, n_dims = {n_dims}, {data_torch.dtype} --> {new_dtype}")
-
- if data.dtype != new_dtype:
- data = data.astype(new_dtype)
-
- self.gguf_writer.add_tensor(new_name, data)
+ # not used with get_rows, must be F32
+ return name == "embeddings.token_type_embeddings.weight"
@Model.register("NomicBertModel")
@@ -2656,46 +2093,20 @@ class GemmaModel(Model):
self.gguf_writer.add_value_length(hparams["head_dim"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- for name, data_torch in self.get_tensors():
- # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
- # To prevent errors, skip loading lm_head.weight.
- if name == "lm_head.weight":
- print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
- continue
+ # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
+ # To prevent errors, skip loading lm_head.weight.
+ if name == "lm_head.weight":
+ logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
+ return []
- old_dtype = data_torch.dtype
+ # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
+ if name.endswith("norm.weight"):
+ data_torch = data_torch + 1
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
- if name.endswith("norm.weight"):
- data_torch = data_torch + 1
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("Starcoder2ForCausalLM")
@@ -2718,35 +2129,43 @@ class MambaModel(Model):
if (self.dir_model / "tokenizer.json").is_file():
self._set_vocab_gpt2()
+ elif (self.dir_model / "tokenizer.model").is_file():
+ self._set_vocab_sentencepiece()
else:
# Use the GPT-NeoX tokenizer when no tokenizer files are present
tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf"
- print(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
+ logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
neox_reader = gguf.GGUFReader(tokenizer_path, "r")
field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL)
- self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]))
+ self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8") if field else "gpt2")
field = neox_reader.get_field(gguf.Keys.Tokenizer.PRE)
- self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]))
+ self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else "mpt")
field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST)
+ assert field
self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
+ assert field
self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES)
+ assert field
self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)
- self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
+ self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0] if field else 1)
field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)
- self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
+ self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0] if field else 0)
field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)
- self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
+ self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0] if field else 0)
+
+ field = neox_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)
+ self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0] if field else 0)
def set_gguf_parameters(self):
d_model = self.find_hparam(["hidden_size", "d_model"])
@@ -2775,60 +2194,42 @@ class MambaModel(Model):
self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams["n_layer"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ _tok_embd = None
- tok_embd = None
- tok_embd_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.TOKEN_EMBD] + ".weight"
- output_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.OUTPUT] + ".weight"
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- for name, data_torch in self.get_tensors():
- old_dtype = data_torch.dtype
+ output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
+ tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ new_name = self.map_tensor_name(name)
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
+ if name.endswith(".A_log"):
+ logger.debug("A_log --> A ==> " + new_name)
+ data_torch = -torch.exp(data_torch)
- if name.endswith(".A_log"):
- print("A_log --> A ==> " + new_name)
- data_torch = -torch.exp(data_torch)
+ # assuming token_embd.weight is seen before output.weight
+ if self._tok_embd is not None and new_name == output_name:
+ if torch.equal(self._tok_embd, data_torch):
+ logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
+ return []
+ elif new_name == tok_embd_name:
+ self._tok_embd = data_torch
- # assuming token_embd.weight is seen before output.weight
- if tok_embd is not None and new_name == output_name:
- if torch.equal(tok_embd, data_torch):
- print(f"{output_name} is equivalent to {tok_embd_name}, omitting")
- continue
- if new_name == tok_embd_name:
- tok_embd = data_torch
+ return [(new_name, data_torch)]
- data = data_torch.squeeze().numpy()
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del n_dims # unused
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert big float32 2-dim weight tensors to float16
- new_weight_name = new_name[:-len(".weight")] if new_name.endswith(".weight") else ""
- if self.ftype == 1 and data_dtype == np.float32 and new_weight_name.endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return bid is not None and new_name in (
+ self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [
+ gguf.MODEL_TENSOR.SSM_CONV1D,
+ gguf.MODEL_TENSOR.SSM_X,
+ gguf.MODEL_TENSOR.SSM_DT,
+ gguf.MODEL_TENSOR.SSM_A,
+ gguf.MODEL_TENSOR.SSM_D,
+ ]
+ )
@Model.register("CohereForCausalLM")
@@ -2856,56 +2257,24 @@ class OlmoModel(Model):
def set_gguf_parameters(self):
super().set_gguf_parameters()
self.gguf_writer.add_layer_norm_eps(1e-5)
- if "clip_qkv" in self.hparams is not None:
- self.gguf_writer.add_clamp_kqv(self.hparams["clip_qkv"])
+ clip_qkv = self.hparams.get("clip_qkv")
+ if clip_qkv is not None:
+ self.gguf_writer.add_clamp_kqv(clip_qkv)
# Same as super class, but permuting q_proj, k_proj
# Copied from: LlamaModel
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
+ n_head = self.hparams["num_attention_heads"]
n_kv_head = self.hparams.get("num_key_value_heads")
- for name, data_torch in self.get_tensors():
- old_dtype = data_torch.dtype
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ if name.endswith("q_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_head)
+ if name.endswith("k_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
- data = data_torch.numpy()
-
- if name.endswith("q_proj.weight"):
- data = permute(data, n_head, n_head)
- if name.endswith("k_proj.weight"):
- data = permute(data, n_head, n_kv_head)
-
- data = data.squeeze()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- print(f"Can not map tensor {name!r}")
- sys.exit()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # 1d tensors need to be converted to float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and n_dims == 2:
- data = data.astype(np.float16)
-
- print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("ArcticForCausalLM")
@@ -3104,6 +2473,127 @@ class ArcticModel(Model):
###### CONVERSION LOGIC ######
+# tree of lazy tensors
+class LazyTorchTensor:
+ _meta: Tensor
+ _data: Tensor | None
+ _args: tuple
+ _func: Callable[[tuple], Tensor] | None
+
+ def __init__(self, *, meta: Tensor, data: Tensor | None = None, args: tuple = (), func: Callable[[tuple], Tensor] | None = None):
+ self._meta = meta
+ self._data = data
+ self._args = args
+ self._func = func
+
+ @staticmethod
+ def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
+ # TODO: dict and set
+ if isinstance(o, (list, tuple)):
+ L = []
+ for item in o:
+ L.append(LazyTorchTensor._recurse_apply(item, fn))
+ if isinstance(o, tuple):
+ L = tuple(L)
+ return L
+ elif isinstance(o, LazyTorchTensor):
+ return fn(o)
+ else:
+ return o
+
+ def _wrap_fn(self, fn: Callable, use_self: bool = False) -> Callable[[Any], LazyTorchTensor]:
+ def wrapped_fn(*args, **kwargs):
+ if kwargs is None:
+ kwargs = {}
+ args = ((self,) if use_self else ()) + args
+
+ meta_args = LazyTorchTensor._recurse_apply(args, lambda t: t._meta)
+
+ return LazyTorchTensor(meta=fn(*meta_args, **kwargs), args=args, func=lambda a: fn(*a, **kwargs))
+ return wrapped_fn
+
+ def __getattr__(self, __name: str) -> Any:
+ meta_attr = getattr(self._meta, __name)
+ if callable(meta_attr):
+ return self._wrap_fn(getattr(torch.Tensor, __name), use_self=True)
+ elif isinstance(meta_attr, torch.Tensor):
+ # for things like self.T
+ return self._wrap_fn(lambda s: getattr(s, __name))(self)
+ else:
+ return meta_attr
+
+ _dtype_map: dict[torch.dtype, type] = {
+ torch.float16: np.float16,
+ torch.float32: np.float32,
+ }
+
+ def numpy(self) -> gguf.LazyTensor:
+ dtype = self._dtype_map[self.dtype]
+ return gguf.LazyTensor(lambda: LazyTorchTensor.to_eager(self).numpy(), dtype=dtype, shape=self.shape)
+
+ @overload
+ @staticmethod
+ def to_eager(t: Tensor | LazyTorchTensor) -> Tensor: ...
+
+ @overload
+ @staticmethod
+ def to_eager(t: tuple) -> tuple: ...
+
+ @staticmethod
+ def to_eager(t: Any) -> Any:
+ def simple_to_eager(_t: LazyTorchTensor) -> Tensor:
+ # wake up the lazy tensor
+ if _t._data is None and _t._func is not None:
+ # recurse into its arguments
+ _t._args = LazyTorchTensor.to_eager(_t._args)
+ _t._data = _t._func(_t._args)
+ if _t._data is not None:
+ return _t._data
+ else:
+ raise ValueError(f"Could not compute lazy tensor {_t!r} with args {_t._args!r}")
+
+ # recurse into lists and/or tuples, keeping their structure
+ return LazyTorchTensor._recurse_apply(t, simple_to_eager)
+
+ @staticmethod
+ def from_eager(t: Tensor) -> Tensor:
+ if (t.__class__ == LazyTorchTensor):
+ return t
+ return LazyTorchTensor(meta=t.detach().to("meta"), data=t) # type: ignore
+
+ @classmethod
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
+ del types # unused
+
+ if kwargs is None:
+ kwargs = {}
+
+ if func is torch.Tensor.numpy:
+ return args[0].numpy()
+ if func is torch.equal:
+ eager_args = LazyTorchTensor.to_eager(args)
+ return func(*eager_args, **kwargs)
+
+ return LazyTorchTensor._wrap_fn(args[0], func)(*args, **kwargs)
+
+ # special methods bypass __getattr__, so they need to be added manually
+ # ref: https://docs.python.org/3/reference/datamodel.html#special-lookup
+ # NOTE: LazyTorchTensor can't be a subclass of Tensor (and then be used
+ # as self._meta is currently used), because then the following
+ # operations would by default not be wrapped, and so not propagated
+ # when the tensor is made eager.
+ # It's better to get non-silent errors for not-yet-supported operators.
+ # TODO: add more when needed to avoid clutter, or find a more concise way
+ def __neg__(self, *args): # mamba
+ return self._wrap_fn(torch.Tensor.__neg__)(self, *args)
+
+ def __add__(self, *args): # gemma
+ return self._wrap_fn(torch.Tensor.__add__)(self, *args)
+
+ def __getitem__(self, *args): # bloom falcon refact internlm2
+ return self._wrap_fn(torch.Tensor.__getitem__)(self, *args)
+
+
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Convert a huggingface model to a GGML compatible file")
@@ -3113,7 +2603,8 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument(
"--awq-path", type=Path, default=None,
- help="Path to scale awq cache file")
+ help="Path to scale awq cache file",
+ )
parser.add_argument(
"--outfile", type=Path,
help="path to write to; default: based on input",
@@ -3122,13 +2613,30 @@ def parse_args() -> argparse.Namespace:
"--outtype", type=str, choices=["f32", "f16"], default="f16",
help="output format - use f32 for float32, f16 for float16",
)
- parser.add_argument("--bigendian", action="store_true", help="model is executed on big endian machine")
+ parser.add_argument(
+ "--bigendian", action="store_true",
+ help="model is executed on big endian machine",
+ )
parser.add_argument(
"model", type=Path,
help="directory containing model file",
)
- parser.add_argument("--use-temp-file", action="store_true", help="use the tempfile library while processing (helpful when running out of memory, process killed)")
- parser.add_argument("--model-name", type=str, default=None, help="name of the model")
+ parser.add_argument(
+ "--use-temp-file", action="store_true",
+ help="use the tempfile library while processing (helpful when running out of memory, process killed)",
+ )
+ parser.add_argument(
+ "--no-lazy", action="store_true",
+ help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
+ )
+ parser.add_argument(
+ "--model-name", type=str, default=None,
+ help="name of the model",
+ )
+ parser.add_argument(
+ "--verbose", action="store_true",
+ help="increase output verbosity",
+ )
return parser.parse_args()
@@ -3136,6 +2644,8 @@ def parse_args() -> argparse.Namespace:
def main() -> None:
args = parse_args()
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
dir_model = args.model
if args.awq_path:
@@ -3144,15 +2654,15 @@ def main() -> None:
tmp_model_path = args.model / "weighted_model"
dir_model = tmp_model_path
if tmp_model_path.is_dir():
- print(f"{tmp_model_path} exists as a weighted model.")
+ logger.info(f"{tmp_model_path} exists as a weighted model.")
else:
tmp_model_path.mkdir(parents=True, exist_ok=True)
- print("Saving new weighted model ...")
+ logger.info("Saving new weighted model ...")
add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path))
- print(f"Saved weighted model at {tmp_model_path}.")
+ logger.info(f"Saved weighted model at {tmp_model_path}.")
if not dir_model.is_dir():
- print(f'Error: {args.model} is not a directory', file=sys.stderr)
+ logger.error(f'Error: {args.model} is not a directory')
sys.exit(1)
ftype_map = {
@@ -3166,28 +2676,28 @@ def main() -> None:
# output in the same directory as the model by default
fname_out = dir_model / f'ggml-model-{args.outtype}.gguf'
- print(f"Loading model: {dir_model.name}")
+ logger.info(f"Loading model: {dir_model.name}")
hparams = Model.load_hparams(dir_model)
with torch.inference_mode():
model_class = Model.from_model_architecture(hparams["architectures"][0])
- model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file)
+ model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file, args.no_lazy)
- print("Set model parameters")
+ logger.info("Set model parameters")
model_instance.set_gguf_parameters()
- print("Set model tokenizer")
+ logger.info("Set model tokenizer")
model_instance.set_vocab()
if args.vocab_only:
- print(f"Exporting model vocab to '{fname_out}'")
+ logger.info(f"Exporting model vocab to '{fname_out}'")
model_instance.write_vocab()
else:
- print(f"Exporting model to '{fname_out}'")
+ logger.info(f"Exporting model to '{fname_out}'")
model_instance.write()
- print(f"Model successfully exported to '{fname_out}'")
+ logger.info(f"Model successfully exported to '{fname_out}'")
if __name__ == '__main__':
diff --git a/convert-llama-ggml-to-gguf.py b/convert-llama-ggml-to-gguf.py
index 5354b748b..9349de3b3 100755
--- a/convert-llama-ggml-to-gguf.py
+++ b/convert-llama-ggml-to-gguf.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import os
import struct
@@ -14,6 +15,8 @@ if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
+logger = logging.getLogger("ggml-to-gguf")
+
class GGMLFormat(IntEnum):
GGML = 0
@@ -125,7 +128,6 @@ class Tensor:
self.start_offset = offset
self.len_bytes = n_bytes
offset += n_bytes
- # print(n_dims, name_len, dtype, self.dims, self.name, pad)
return offset - orig_offset
@@ -175,7 +177,7 @@ class GGMLModel:
offset += self.validate_header(data, offset)
hp = Hyperparameters()
offset += hp.load(data, offset)
- print(f'* File format: {self.file_format.name}v{self.format_version} with ftype {hp.ftype.name}')
+ logger.info(f'* File format: {self.file_format.name}v{self.format_version} with ftype {hp.ftype.name}')
self.validate_conversion(hp.ftype)
vocab = Vocab(load_scores = self.file_format > GGMLFormat.GGML)
offset += vocab.load(data, offset, hp.n_vocab)
@@ -215,12 +217,12 @@ class GGMLToGGUF:
if float(hp.n_head) / float(x) == gqa:
n_kv_head = x
assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param"
- print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}')
+ logger.info(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}')
self.n_kv_head = n_kv_head
self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer)
def save(self):
- print('* Preparing to save GGUF file')
+ logger.info('* Preparing to save GGUF file')
gguf_writer = gguf.GGUFWriter(
self.cfg.output,
gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA],
@@ -230,11 +232,11 @@ class GGMLToGGUF:
if self.special_vocab is not None:
self.special_vocab.add_to_gguf(gguf_writer)
self.add_tensors(gguf_writer)
- print(" gguf: write header")
+ logger.info(" gguf: write header")
gguf_writer.write_header_to_file()
- print(" gguf: write metadata")
+ logger.info(" gguf: write metadata")
gguf_writer.write_kv_data_to_file()
- print(" gguf: write tensors")
+ logger.info(" gguf: write tensors")
gguf_writer.write_tensors_to_file()
gguf_writer.close()
@@ -250,7 +252,7 @@ class GGMLToGGUF:
name = cfg.name if cfg.name is not None else cfg.input.name
except UnicodeDecodeError:
name = None
- print('* Adding model parameters and KV items')
+ logger.info('* Adding model parameters and KV items')
if name is not None:
gguf_writer.add_name(name)
gguf_writer.add_description(desc)
@@ -287,7 +289,7 @@ class GGMLToGGUF:
toktypes = []
if self.vocab_override is not None:
vo = self.vocab_override
- print('* Adding vocab item(s)')
+ logger.info('* Adding vocab item(s)')
for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
tokens.append(vbytes)
scores.append(score)
@@ -299,7 +301,7 @@ class GGMLToGGUF:
if len(toktypes) > 0:
gguf_writer.add_token_types(toktypes)
return
- print(f'* Adding {hp.n_vocab} vocab item(s)')
+ logger.info(f'* Adding {hp.n_vocab} vocab item(s)')
assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab'
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
tt = 1 # Normal
@@ -334,7 +336,7 @@ class GGMLToGGUF:
def add_tensors(self, gguf_writer):
tensor_map = self.name_map
data = self.data
- print(f'* Adding {len(self.model.tensors)} tensor(s)')
+ logger.info(f'* Adding {len(self.model.tensors)} tensor(s)')
for tensor in self.model.tensors:
name = str(tensor.name, 'UTF-8')
mapped_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
@@ -344,7 +346,6 @@ class GGMLToGGUF:
temp = tempdims[1]
tempdims[1] = tempdims[0]
tempdims[0] = temp
- # print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}')
gguf_writer.add_tensor(
mapped_name,
data[tensor.start_offset:tensor.start_offset + tensor.len_bytes],
@@ -401,33 +402,35 @@ def handle_args():
help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir")
parser.add_argument("--vocabtype", default="spm,hfft",
help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm,hfft)")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
return parser.parse_args()
def main():
cfg = handle_args()
- print(f'* Using config: {cfg}')
- print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n')
+ logging.basicConfig(level=logging.DEBUG if cfg.verbose else logging.INFO)
+ logger.info(f'* Using config: {cfg}')
+ logger.warning('=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===')
if cfg.model_metadata_dir is None and (cfg.gqa == 1 or cfg.eps == '5.0e-06'):
- print('- Note: If converting LLaMA2, specifying "--eps 1e-5" is required. 70B models also need "--gqa 8".')
+ logger.info('- Note: If converting LLaMA2, specifying "--eps 1e-5" is required. 70B models also need "--gqa 8".')
data = np.memmap(cfg.input, mode = 'r')
model = GGMLModel()
- print('* Scanning GGML input file')
+ logger.info('* Scanning GGML input file')
offset = model.load(data, 0) # noqa
- print(f'* GGML model hyperparameters: {model.hyperparameters}')
+ logger.info(f'* GGML model hyperparameters: {model.hyperparameters}')
vocab_override = None
params_override = None
special_vocab = None
if cfg.model_metadata_dir is not None:
(params_override, vocab_override, special_vocab) = handle_metadata(cfg, model.hyperparameters)
- print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.')
- print(f'* Overriding params: {params_override}')
- print(f'* Overriding vocab: {vocab_override}')
- print(f'* Special vocab: {special_vocab}')
+ logger.info('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.')
+ logger.info(f'* Overriding params: {params_override}')
+ logger.info(f'* Overriding vocab: {vocab_override}')
+ logger.info(f'* Special vocab: {special_vocab}')
else:
- print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n')
+ logger.warning('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n')
if model.file_format == GGMLFormat.GGML:
- print('! This is a very old GGML file that does not contain vocab scores. Strongly recommend using model metadata!')
+ logger.info('! This is a very old GGML file that does not contain vocab scores. Strongly recommend using model metadata!')
converter = GGMLToGGUF(
model, data, cfg,
params_override = params_override,
@@ -435,7 +438,7 @@ def main():
special_vocab = special_vocab
)
converter.save()
- print(f'* Successful completion. Output saved to: {cfg.output}')
+ logger.info(f'* Successful completion. Output saved to: {cfg.output}')
if __name__ == '__main__':
diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py
index 9a9936dec..f09fa85fe 100755
--- a/convert-lora-to-ggml.py
+++ b/convert-lora-to-ggml.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import json
import os
import struct
@@ -15,6 +16,9 @@ if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
import gguf
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger("lora-to-gguf")
+
NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1}
@@ -48,11 +52,9 @@ def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_ty
if __name__ == '__main__':
if len(sys.argv) < 2:
- print(f"Usage: python {sys.argv[0]} [arch]")
- print(
- "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
- )
- print(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)")
+ logger.info(f"Usage: python {sys.argv[0]} [arch]")
+ logger.info("Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'")
+ logger.info(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)")
sys.exit(1)
input_json = os.path.join(sys.argv[1], "adapter_config.json")
@@ -70,7 +72,7 @@ if __name__ == '__main__':
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
if arch_name not in gguf.MODEL_ARCH_NAMES.values():
- print(f"Error: unsupported architecture {arch_name}")
+ logger.error(f"Error: unsupported architecture {arch_name}")
sys.exit(1)
arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)]
@@ -80,21 +82,21 @@ if __name__ == '__main__':
params = json.load(f)
if params["peft_type"] != "LORA":
- print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
+ logger.error(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
sys.exit(1)
if params["fan_in_fan_out"] is True:
- print("Error: param fan_in_fan_out is not supported")
+ logger.error("Error: param fan_in_fan_out is not supported")
sys.exit(1)
if params["bias"] is not None and params["bias"] != "none":
- print("Error: param bias is not supported")
+ logger.error("Error: param bias is not supported")
sys.exit(1)
# TODO: these seem to be layers that have been trained but without lora.
# doesn't seem widely used but eventually should be supported
if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
- print("Error: param modules_to_save is not supported")
+ logger.error("Error: param modules_to_save is not supported")
sys.exit(1)
with open(output_path, "wb") as fout:
@@ -125,13 +127,13 @@ if __name__ == '__main__':
suffix = k[-len(lora_suffixes[0]):]
k = k[: -len(lora_suffixes[0])]
else:
- print(f"Error: unrecognized tensor name {orig_k}")
+ logger.error(f"Error: unrecognized tensor name {orig_k}")
sys.exit(1)
tname = name_map.get_name(k)
if tname is None:
- print(f"Error: could not map tensor name {orig_k}")
- print(" Note: the arch parameter must be specified if the model is not llama")
+ logger.error(f"Error: could not map tensor name {orig_k}")
+ logger.error(" Note: the arch parameter must be specified if the model is not llama")
sys.exit(1)
if suffix == ".lora_A.weight":
@@ -141,8 +143,8 @@ if __name__ == '__main__':
else:
assert False
- print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
+ logger.info(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
write_tensor_header(fout, tname, t.shape, t.dtype)
t.tofile(fout)
- print(f"Converted {input_json} and {input_model} to {output_path}")
+ logger.info(f"Converted {input_json} and {input_model} to {output_path}")
diff --git a/convert-persimmon-to-gguf.py b/convert-persimmon-to-gguf.py
index aba575426..07dcade74 100755
--- a/convert-persimmon-to-gguf.py
+++ b/convert-persimmon-to-gguf.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import os
import sys
@@ -14,6 +15,8 @@ if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
+logger = logging.getLogger("persimmon-to-gguf")
+
def _flatten_dict(dct, tensors, prefix=None):
assert isinstance(dct, dict)
@@ -30,9 +33,9 @@ def _flatten_dict(dct, tensors, prefix=None):
def _get_sentencepiece_tokenizer_info(dir_model: Path):
tokenizer_path = dir_model / 'adept_vocab.model'
- print('gguf: getting sentencepiece tokenizer from', tokenizer_path)
+ logger.info('getting sentencepiece tokenizer from', tokenizer_path)
tokenizer = SentencePieceProcessor(str(tokenizer_path))
- print('gguf: adding tokens')
+ logger.info('adding tokens')
tokens: list[bytes] = []
scores: list[float] = []
toktypes: list[int] = []
@@ -67,8 +70,10 @@ def main():
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
parser.add_argument("--ckpt-path", type=Path, help="path to persimmon checkpoint .pt file")
parser.add_argument("--model-dir", type=Path, help="directory containing model e.g. 8b_chat_model_release")
- parser.add_argument("--adept-inference-dir", type=str, help="path to adept-inference code directory")
+ parser.add_argument("--adept-inference-dir", type=str, help="path to adept-inference code directory")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
args = parser.parse_args()
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
sys.path.append(str(args.adept_inference_dir))
persimmon_model = torch.load(args.ckpt_path)
hparams = persimmon_model['args']
@@ -107,7 +112,7 @@ def main():
gguf_writer.add_eos_token_id(71013)
tensor_map = gguf.get_tensor_name_map(arch, block_count)
- print(tensor_map)
+ logger.info(tensor_map)
for name in tensors.keys():
data_torch = tensors[name]
if name.endswith(".self_attention.rotary_emb.inv_freq"):
@@ -117,22 +122,21 @@ def main():
data = data_torch.to(torch.float32).squeeze().numpy()
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
if new_name is None:
- print("Can not map tensor '" + name + "'")
- sys.exit()
+ raise ValueError(f"Can not map tensor '{name}'")
+
n_dims = len(data.shape)
- print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
+ logger.debug(f"{new_name}, n_dims = {str(n_dims)}, {str(old_dtype)} --> {str(data.dtype)}")
gguf_writer.add_tensor(new_name, data)
- print("gguf: write header")
+ logger.info("gguf: write header")
gguf_writer.write_header_to_file()
- print("gguf: write metadata")
+ logger.info("gguf: write metadata")
gguf_writer.write_kv_data_to_file()
- print("gguf: write tensors")
+ logger.info("gguf: write tensors")
gguf_writer.write_tensors_to_file()
gguf_writer.close()
- print(f"gguf: model successfully exported to '{args.outfile}'")
- print("")
+ logger.info(f"gguf: model successfully exported to '{args.outfile}'")
if __name__ == '__main__':
diff --git a/convert.py b/convert.py
index 1c700cf6a..148bfd66a 100755
--- a/convert.py
+++ b/convert.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import concurrent.futures
import enum
@@ -35,6 +36,8 @@ import gguf
if TYPE_CHECKING:
from typing_extensions import Self, TypeAlias
+logger = logging.getLogger("convert")
+
if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
faulthandler.register(signal.SIGUSR1)
@@ -281,6 +284,7 @@ class Params:
n_experts = None
n_experts_used = None
f_rope_freq_base = None
+ n_ff = None
# hack to determine LLaMA v1 vs v2 vs CodeLlama
if config.get("moe"):
@@ -305,6 +309,8 @@ class Params:
n_experts_used = config["moe"]["num_experts_per_tok"]
f_rope_freq_base = 1e6
+ assert n_ff is not None
+
return Params(
n_vocab = model["tok_embeddings.weight"].shape[0],
n_embd = config["dim"],
@@ -459,7 +465,8 @@ class SentencePieceVocab(Vocab):
# not found in alternate location either
raise FileNotFoundError('Cannot find tokenizer.model')
- self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
+ self.sentencepiece_tokenizer = SentencePieceProcessor()
+ self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
vocab_size = self.sentencepiece_tokenizer.vocab_size()
new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
@@ -479,23 +486,23 @@ class SentencePieceVocab(Vocab):
def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
tokenizer = self.sentencepiece_tokenizer
for i in range(tokenizer.vocab_size()):
- piece = tokenizer.id_to_piece(i)
+ piece = tokenizer.IdToPiece(i)
text = piece.encode("utf-8")
- score: float = tokenizer.get_score(i)
+ score: float = tokenizer.GetScore(i)
toktype = gguf.TokenType.NORMAL
- if tokenizer.is_unknown(i):
+ if tokenizer.IsUnknown(i):
toktype = gguf.TokenType.UNKNOWN
- if tokenizer.is_control(i):
+ if tokenizer.IsControl(i):
toktype = gguf.TokenType.CONTROL
# NOTE: I think added_tokens are user defined.
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
# if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
- if tokenizer.is_unused(i):
+ if tokenizer.IsUnused(i):
toktype = gguf.TokenType.UNUSED
- if tokenizer.is_byte(i):
+ if tokenizer.IsByte(i):
toktype = gguf.TokenType.BYTE
yield text, score, toktype
@@ -643,7 +650,6 @@ class LlamaHfVocab(Vocab):
def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:
- # print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) )
if n_head_kv is not None and n_head != n_head_kv:
n_head = n_head_kv
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
@@ -904,7 +910,7 @@ class LazyUnpickler(pickle.Unpickler):
def rebuild_from_type_v2(func, new_type, args, state):
return func(*args)
- CLASSES = {
+ CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = {
# getattr used here as a workaround for mypy not being smart enough to determine
# the staticmethods have a __func__ attribute.
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
@@ -1033,12 +1039,12 @@ def check_vocab_size(params: Params, vocab: BaseVocab, pad_vocab: bool = False)
# Check for a vocab size mismatch
if params.n_vocab == vocab.vocab_size:
- print("Ignoring added_tokens.json since model matches vocab size without it.")
+ logger.warning("Ignoring added_tokens.json since model matches vocab size without it.")
return
if pad_vocab and params.n_vocab > vocab.vocab_size:
pad_count = params.n_vocab - vocab.vocab_size
- print(
+ logger.debug(
f"Padding vocab with {pad_count} token(s) - through "
)
for i in range(1, pad_count + 1):
@@ -1166,7 +1172,7 @@ class OutputFile:
elapsed = time.time() - start
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
padi = len(str(len(model)))
- print(
+ logger.info(
f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}"
)
self.gguf.write_tensor_data(ndarray)
@@ -1281,12 +1287,12 @@ def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) ->
# HF models permut or pack some of the tensors, so we need to undo that
for i in itertools.count():
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
- print(f"Permuting layer {i}")
+ logger.debug(f"Permuting layer {i}")
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head)
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
# tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
- print(f"Unpacking and permuting layer {i}")
+ logger.debug(f"Unpacking and permuting layer {i}")
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head)
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
@@ -1299,15 +1305,15 @@ def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) ->
tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None)
if name_new is None:
if skip_unknown:
- print(f"Unexpected tensor name: {name} - skipping")
+ logger.warning(f"Unexpected tensor name: {name} - skipping")
continue
raise ValueError(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)")
if tensor_type in should_skip:
- print(f"skipping tensor {name_new}")
+ logger.debug(f"skipping tensor {name_new}")
continue
- print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}")
+ logger.debug(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}")
out[name_new] = lazy_tensor
return out
@@ -1372,7 +1378,7 @@ def load_some_model(path: Path) -> ModelPlus:
paths = find_multifile_paths(path)
models_plus: list[ModelPlus] = []
for path in paths:
- print(f"Loading model file {path}")
+ logger.info(f"Loading model file {path}")
models_plus.append(lazy_load_file(path))
model_plus = merge_multifile_models(models_plus)
@@ -1413,7 +1419,7 @@ class VocabFactory:
else:
raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}")
- print(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}")
+ logger.info(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}")
return vocab
def load_vocab(self, vocab_types: list[str] | None, model_parent_path: Path) -> tuple[BaseVocab, gguf.SpecialVocab]:
@@ -1438,19 +1444,19 @@ def default_outfile(model_paths: list[Path], file_type: GGMLFileType) -> Path:
}[file_type]
ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
if ret in model_paths:
- sys.stderr.write(
+ logger.error(
f"Error: Default output path ({ret}) would overwrite the input. "
- "Please explicitly specify a path using --outfile.\n")
+ "Please explicitly specify a path using --outfile.")
sys.exit(1)
return ret
def do_dump_model(model_plus: ModelPlus) -> None:
- print(f"model_plus.paths = {model_plus.paths!r}")
- print(f"model_plus.format = {model_plus.format!r}")
- print(f"model_plus.vocab = {model_plus.vocab!r}")
+ print(f"model_plus.paths = {model_plus.paths!r}") # noqa: NP100
+ print(f"model_plus.format = {model_plus.format!r}") # noqa: NP100
+ print(f"model_plus.vocab = {model_plus.vocab!r}") # noqa: NP100
for name, lazy_tensor in model_plus.model.items():
- print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
+ print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") # noqa: NP100
def main(args_in: list[str] | None = None) -> None:
@@ -1473,8 +1479,18 @@ def main(args_in: list[str] | None = None) -> None:
parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine")
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
args = parser.parse_args(args_in)
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ elif args.dump_single or args.dump:
+ # Avoid printing anything besides the dump output
+ logging.basicConfig(level=logging.WARNING)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
if args.no_vocab and args.vocab_only:
raise ValueError("--vocab-only does not make sense with --no-vocab")
@@ -1491,29 +1507,32 @@ def main(args_in: list[str] | None = None) -> None:
if args.dump:
do_dump_model(model_plus)
return
+
endianess = gguf.GGUFEndian.LITTLE
if args.big_endian:
endianess = gguf.GGUFEndian.BIG
- params = Params.load(model_plus)
- if params.n_ctx == -1:
- if args.ctx is None:
- msg = """\
- The model doesn't have a context size, and you didn't specify one with --ctx
- Please specify one with --ctx:
- - LLaMA v1: --ctx 2048
- - LLaMA v2: --ctx 4096"""
- parser.error(textwrap.dedent(msg))
- params.n_ctx = args.ctx
+ params = None
+ if args.pad_vocab or not args.vocab_only:
+ params = Params.load(model_plus)
+ if params.n_ctx == -1:
+ if args.ctx is None:
+ msg = """\
+ The model doesn't have a context size, and you didn't specify one with --ctx
+ Please specify one with --ctx:
+ - LLaMA v1: --ctx 2048
+ - LLaMA v2: --ctx 4096"""
+ parser.error(textwrap.dedent(msg))
+ params.n_ctx = args.ctx
- if args.outtype:
- params.ftype = {
- "f32": GGMLFileType.AllF32,
- "f16": GGMLFileType.MostlyF16,
- "q8_0": GGMLFileType.MostlyQ8_0,
- }[args.outtype]
+ if args.outtype:
+ params.ftype = {
+ "f32": GGMLFileType.AllF32,
+ "f16": GGMLFileType.MostlyF16,
+ "q8_0": GGMLFileType.MostlyQ8_0,
+ }[args.outtype]
- print(f"params = {params}")
+ logger.info(f"params = {params}")
model_parent_path = model_plus.paths[0].parent
vocab_path = Path(args.vocab_dir or args.model or model_parent_path)
@@ -1526,17 +1545,27 @@ def main(args_in: list[str] | None = None) -> None:
if not args.outfile:
raise ValueError("need --outfile if using --vocab-only")
outfile = args.outfile
+ if params is None:
+ params = Params(
+ n_vocab = vocab.vocab_size,
+ n_embd = 1,
+ n_layer = 1,
+ n_ctx = 1,
+ n_ff = 1,
+ n_head = 1,
+ n_head_kv = 1,
+ f_norm_eps = 1e-5,
+ )
OutputFile.write_vocab_only(outfile, params, vocab, special_vocab,
endianess=endianess, pad_vocab=args.pad_vocab)
- print(f"Wrote {outfile}")
+ logger.info(f"Wrote {outfile}")
return
if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
vocab = model_plus.vocab
- print(f"Vocab info: {vocab}")
- print(f"Special vocab info: {special_vocab}")
-
+ logger.info(f"Vocab info: {vocab}")
+ logger.info(f"Special vocab info: {special_vocab}")
model = model_plus.model
model = convert_model_names(model, params, args.skip_unknown)
ftype = pick_output_type(model, args.outtype)
@@ -1544,11 +1573,11 @@ def main(args_in: list[str] | None = None) -> None:
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
params.ftype = ftype
- print(f"Writing {outfile}, format {ftype}")
+ logger.info(f"Writing {outfile}, format {ftype}")
OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab,
concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab)
- print(f"Wrote {outfile}")
+ logger.info(f"Wrote {outfile}")
if __name__ == '__main__':
diff --git a/docs/BLIS.md b/docs/BLIS.md
index 0bcd6eeef..c933766b7 100644
--- a/docs/BLIS.md
+++ b/docs/BLIS.md
@@ -23,7 +23,7 @@ Install BLIS:
sudo make install
```
-We recommend using openmp since it's easier to modify the cores been used.
+We recommend using openmp since it's easier to modify the cores being used.
### llama.cpp compilation
diff --git a/docs/HOWTO-add-model.md b/docs/HOWTO-add-model.md
index a56b78344..48769cdf6 100644
--- a/docs/HOWTO-add-model.md
+++ b/docs/HOWTO-add-model.md
@@ -96,9 +96,9 @@ NOTE: The dimensions in `ggml` are typically in the reverse order of the `pytorc
This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`.
-Have a look to existing implementation like `build_llama`, `build_dbrx` or `build_bert`.
+Have a look at existing implementation like `build_llama`, `build_dbrx` or `build_bert`.
-When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support of missing backend operations can be added in another PR.
+When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support for missing backend operations can be added in another PR.
Note: to debug the inference graph: you can use [eval-callback](../examples/eval-callback).
diff --git a/examples/convert-llama2c-to-ggml/README.md b/examples/convert-llama2c-to-ggml/README.md
index 6da2d7e18..742dcf7a3 100644
--- a/examples/convert-llama2c-to-ggml/README.md
+++ b/examples/convert-llama2c-to-ggml/README.md
@@ -2,7 +2,7 @@
This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default.
-To convert the model first download the models from the [llma2.c](https://github.com/karpathy/llama2.c) repository:
+To convert the model first download the models from the [llama2.c](https://github.com/karpathy/llama2.c) repository:
`$ make -j`
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp
index 3da5317b3..22743b1bf 100644
--- a/examples/finetune/finetune.cpp
+++ b/examples/finetune/finetune.cpp
@@ -575,7 +575,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
GGML_ASSERT(tokens_input->type == GGML_TYPE_I32);
auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
- if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16) {
+ if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16 || a->type == GGML_TYPE_BF16) {
return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
} else if (a->type == GGML_TYPE_F32) {
return ggml_add(ctx, a, b);
diff --git a/examples/gguf-split/gguf-split.cpp b/examples/gguf-split/gguf-split.cpp
index 39c75e0a7..e04feeae3 100644
--- a/examples/gguf-split/gguf-split.cpp
+++ b/examples/gguf-split/gguf-split.cpp
@@ -32,6 +32,7 @@ struct split_params {
int n_split_tensors = 128;
std::string input;
std::string output;
+ bool no_tensor_first_split = false;
bool dry_run = false;
};
@@ -49,6 +50,7 @@ static void split_print_usage(const char * executable) {
printf(" --merge merge multiple GGUF to a single GGUF\n");
printf(" --split-max-tensors max tensors in each split (default: %d)\n", default_params.n_split_tensors);
printf(" --split-max-size N(M|G) max size per split\n");
+ printf(" --no-tensor-first-split do not add tensors to the first split (disabled by default)\n");
printf(" --dry-run only print out a split plan and exit, without writing any new files\n");
printf("\n");
}
@@ -100,6 +102,10 @@ static void split_params_parse_ex(int argc, const char ** argv, split_params & p
arg_found = true;
params.dry_run = true;
}
+ if (arg == "--no-tensor-first-split") {
+ arg_found = true;
+ params.no_tensor_first_split = true;
+ }
if (is_op_set) {
throw std::invalid_argument("error: either --split or --merge can be specified, but not both");
@@ -200,10 +206,10 @@ struct split_strategy {
// because we need to know list of tensors for each file in advance, we will build all the ctx_out for all output splits
int i_split = -1;
struct gguf_context * ctx_out = NULL;
- auto new_ctx_out = [&]() {
+ auto new_ctx_out = [&](bool allow_no_tensors) {
i_split++;
if (ctx_out != NULL) {
- if (gguf_get_n_tensors(ctx_out) == 0) {
+ if (gguf_get_n_tensors(ctx_out) == 0 && !allow_no_tensors) {
fprintf(stderr, "error: one of splits have 0 tensors. Maybe size or tensors limit is too small\n");
exit(EXIT_FAILURE);
}
@@ -220,7 +226,12 @@ struct split_strategy {
};
// initialize ctx_out for the first split
- new_ctx_out();
+ new_ctx_out(false);
+
+ // skip first split if no_tensor_first_split is set
+ if (params.no_tensor_first_split) {
+ new_ctx_out(true);
+ }
// process tensors one by one
size_t curr_tensors_size = 0; // current size by counting only tensors size (without metadata)
@@ -230,7 +241,7 @@ struct split_strategy {
size_t n_bytes = GGML_PAD(ggml_nbytes(t), GGUF_DEFAULT_ALIGNMENT);
size_t next_tensors_size = curr_tensors_size + n_bytes;
if (should_split(i, next_tensors_size)) {
- new_ctx_out();
+ new_ctx_out(false);
curr_tensors_size = n_bytes;
} else {
curr_tensors_size = next_tensors_size;
diff --git a/examples/gguf-split/tests.sh b/examples/gguf-split/tests.sh
index 57588204d..7ca6fa7f2 100755
--- a/examples/gguf-split/tests.sh
+++ b/examples/gguf-split/tests.sh
@@ -55,15 +55,15 @@ $MAIN --model $WORK_PATH/ggml-model-merge.gguf --random-prompt --n-predict 32
echo PASS
echo
-# 4. Split with no tensor in metadata
-#$SPLIT --split-max-tensors 32 --no-tensor-in-metadata $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-32-tensors
-#echo PASS
-#echo
+# 4. Split with no tensors in the first split
+$SPLIT --split-max-tensors 32 --no-tensor-first-split $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-32-tensors
+echo PASS
+echo
# 4b. Test the sharded model is loading properly
-#$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf --random-prompt --n-predict 32
-#echo PASS
-#echo
+$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00007.gguf --random-prompt --n-predict 32
+echo PASS
+echo
# 5. Merge
#$SPLIT --merge $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf $WORK_PATH/ggml-model-merge-2.gguf
diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp
index 71e7a727f..82b19fc4f 100644
--- a/examples/imatrix/imatrix.cpp
+++ b/examples/imatrix/imatrix.cpp
@@ -19,6 +19,7 @@
struct Stats {
std::vector values;
+ std::vector counts;
int ncall = 0;
};
@@ -121,12 +122,10 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
auto & e = m_stats[wname];
++e.ncall;
- // NOTE: since we select top-k experts, the number of calls for the expert tensors will be k times larger
- // using the following line, we can correct for that if needed by replacing the line above with:
- //if (idx == t->src[0]->ne[0] - 1) ++e.ncall;
if (e.values.empty()) {
e.values.resize(src1->ne[0]*n_as, 0);
+ e.counts.resize(src1->ne[0]*n_as, 0);
}
else if (e.values.size() != (size_t)src1->ne[0]*n_as) {
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]*n_as);
@@ -153,6 +152,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
for (int j = 0; j < (int)src1->ne[0]; ++j) {
e.values[e_start + j] += x[j]*x[j];
+ e.counts[e_start + j]++;
}
}
}
@@ -170,6 +170,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
auto& e = m_stats[wname];
if (e.values.empty()) {
e.values.resize(src1->ne[0], 0);
+ e.counts.resize(src1->ne[0], 0);
}
else if (e.values.size() != (size_t)src1->ne[0]) {
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
@@ -183,6 +184,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
const float * x = data + row * src1->ne[0];
for (int j = 0; j < (int)src1->ne[0]; ++j) {
e.values[j] += x[j]*x[j];
+ e.counts[j]++;
}
}
if (e.ncall > m_last_call) {
@@ -222,7 +224,13 @@ void IMatrixCollector::save_imatrix(const char * fname, const char * dataset) co
out.write((const char *) &p.second.ncall, sizeof(p.second.ncall));
int nval = p.second.values.size();
out.write((const char *) &nval, sizeof(nval));
- if (nval > 0) out.write((const char *) p.second.values.data(), nval * sizeof(float));
+ if (nval > 0) {
+ std::vector tmp(nval);
+ for (int i = 0; i < nval; i++) {
+ tmp[i] = (p.second.values[i] / static_cast(p.second.counts[i])) * static_cast(p.second.ncall);
+ }
+ out.write((const char*)tmp.data(), nval*sizeof(float));
+ }
}
// Write the number of call the matrix was computed with
@@ -270,14 +278,28 @@ bool IMatrixCollector::load_imatrix(const char * imatrix_file, std::unordered_ma
imatrix_data = {};
return false;
}
- e.values.resize(nval);
- in.read((char*)e.values.data(), nval*sizeof(float));
+
+ // When re-called from load_imatrix() with add set, this will already be created.
+ if (e.values.empty()) {
+ e.values.resize(nval, 0);
+ e.counts.resize(nval, 0);
+ }
+
+ std::vector tmp(nval);
+ in.read((char*)tmp.data(), nval*sizeof(float));
if (in.fail()) {
printf("%s: failed reading data for entry %d\n",__func__,i);
imatrix_data = {};
return false;
}
- e.ncall = ncall;
+
+ // Recreate the state as expected by save_imatrix(), and corerct for weighted sum.
+ for (int i = 0; i < nval; i++) {
+ e.values[i] += tmp[i];
+ e.counts[i] += ncall;
+ }
+ e.ncall += ncall;
+
}
return true;
}
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 95c3095dd..40128ec44 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -178,6 +178,7 @@ struct cmd_params {
std::vector> tensor_split;
std::vector use_mmap;
std::vector embeddings;
+ ggml_numa_strategy numa;
int reps;
bool verbose;
output_formats output_format;
@@ -200,6 +201,7 @@ static const cmd_params cmd_params_defaults = {
/* tensor_split */ {std::vector(llama_max_devices(), 0.0f)},
/* use_mmap */ {true},
/* embeddings */ {false},
+ /* numa */ GGML_NUMA_STRATEGY_DISABLED,
/* reps */ 5,
/* verbose */ false,
/* output_format */ MARKDOWN
@@ -224,6 +226,7 @@ static void print_usage(int /* argc */, char ** argv) {
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
printf(" -fa, --flash-attn <0|1> (default: %s)\n", join(cmd_params_defaults.flash_attn, ",").c_str());
printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
+ printf(" --numa (default: disabled)\n");
printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str());
printf(" -ts, --tensor-split (default: 0)\n");
printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps);
@@ -396,6 +399,17 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
}
auto p = split(argv[i], split_delim);
params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end());
+ } else if (arg == "--numa") {
+ if (++i >= argc) {
+ invalid_param = true;
+ break;
+ } else {
+ std::string value(argv[i]);
+ /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
+ else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
+ else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
+ else { invalid_param = true; break; }
+ }
} else if (arg == "-fa" || arg == "--flash-attn") {
if (++i >= argc) {
invalid_param = true;
@@ -1215,6 +1229,7 @@ int main(int argc, char ** argv) {
llama_log_set(llama_null_log_callback, NULL);
}
llama_backend_init();
+ llama_numa_init(params.numa);
// initialize printer
std::unique_ptr p;
diff --git a/examples/llava/README.md b/examples/llava/README.md
index d4810d42e..4fb0cf381 100644
--- a/examples/llava/README.md
+++ b/examples/llava/README.md
@@ -56,7 +56,7 @@ python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-pa
python ./convert.py ../llava-v1.5-7b --skip-unknown
```
-Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` directory.
+Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
## LLaVA 1.6 gguf conversion
1) First clone a LLaVA 1.6 model:
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
index e3c9bcd43..f568f470c 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
@@ -104,7 +104,6 @@ static std::string format(const char * fmt, ...) {
#define TN_POS_EMBD "%s.position_embd.weight"
#define TN_CLASS_EMBD "v.class_embd"
#define TN_PATCH_EMBD "v.patch_embd.weight"
-#define TN_PATCH_BIAS "v.patch_embd.bias"
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
@@ -426,7 +425,6 @@ struct clip_vision_model {
// embeddings
struct ggml_tensor * class_embedding;
struct ggml_tensor * patch_embeddings;
- struct ggml_tensor * patch_bias;
struct ggml_tensor * position_embeddings;
struct ggml_tensor * pre_ln_w;
@@ -503,11 +501,6 @@ struct clip_ctx {
bool use_gelu = false;
int32_t ftype = 1;
- bool has_class_embedding = true;
- bool has_pre_norm = true;
- bool has_post_norm = false;
- bool has_patch_bias = false;
-
struct gguf_context * ctx_gguf;
struct ggml_context * ctx_data;
@@ -533,7 +526,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
const int patch_size = hparams.patch_size;
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
- const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
+ const int num_positions = num_patches + 1;
const int hidden_size = hparams.hidden_size;
const int n_head = hparams.n_head;
const int d_head = hidden_size / n_head;
@@ -564,23 +557,16 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
- if (ctx->has_patch_bias) {
- // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
- inp = ggml_add(ctx0, inp, model.patch_bias);
- }
-
// concat class_embeddings and patch_embeddings
- struct ggml_tensor * embeddings = inp;
- if (ctx->has_class_embedding) {
- embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
- embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
- embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
- embeddings = ggml_acc(ctx0, embeddings, inp,
- embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
- }
+ struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
ggml_set_name(embeddings, "embeddings");
ggml_set_input(embeddings);
+ embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
+ embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
+
+ embeddings = ggml_acc(ctx0, embeddings, inp,
+ embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
ggml_set_name(positions, "positions");
@@ -590,7 +576,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
// pre-layernorm
- if (ctx->has_pre_norm) {
+ {
embeddings = ggml_norm(ctx0, embeddings, eps);
ggml_set_name(embeddings, "pre_ln");
@@ -678,14 +664,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
embeddings = cur;
}
- // post-layernorm
- if (ctx->has_post_norm) {
- embeddings = ggml_norm(ctx0, embeddings, eps);
- ggml_set_name(embeddings, "post_ln");
-
- embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
- }
-
// llava projector
{
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
@@ -1170,39 +1148,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
}
- try {
- vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
- new_clip->has_class_embedding = true;
- } catch (const std::exception& e) {
- new_clip->has_class_embedding = false;
- }
-
- try {
- vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
- vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
- new_clip->has_pre_norm = true;
- } catch (std::exception & e) {
- new_clip->has_pre_norm = false;
- }
-
- try {
- vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
- vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
- new_clip->has_post_norm = true;
- } catch (std::exception & e) {
- new_clip->has_post_norm = false;
- }
-
- try {
- vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
- new_clip->has_patch_bias = true;
- } catch (std::exception & e) {
- new_clip->has_patch_bias = false;
- }
-
try {
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
+ vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
+ vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
+ vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
} catch(const std::exception& e) {
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
}
diff --git a/examples/main/README.md b/examples/main/README.md
index e7a38743c..97e2ae4c2 100644
--- a/examples/main/README.md
+++ b/examples/main/README.md
@@ -143,7 +143,7 @@ The `--ctx-size` option allows you to set the size of the prompt context used by
### Extended Context Size
-Some fine-tuned models have extended the context length by scaling RoPE. For example, if the original pre-trained model have a context length (max sequence length) of 4096 (4k) and the fine-tuned model have 32k. That is a scaling factor of 8, and should work by setting the above `--ctx-size` to 32768 (32k) and `--rope-scale` to 8.
+Some fine-tuned models have extended the context length by scaling RoPE. For example, if the original pre-trained model has a context length (max sequence length) of 4096 (4k) and the fine-tuned model has 32k. That is a scaling factor of 8, and should work by setting the above `--ctx-size` to 32768 (32k) and `--rope-scale` to 8.
- `--rope-scale N`: Where N is the linear scaling factor used by the fine-tuned model.
@@ -286,7 +286,7 @@ These options help improve the performance and memory usage of the LLaMA models.
- `--numa distribute`: Pin an equal proportion of the threads to the cores on each NUMA node. This will spread the load amongst all cores on the system, utilitizing all memory channels at the expense of potentially requiring memory to travel over the slow links between nodes.
- `--numa isolate`: Pin all threads to the NUMA node that the program starts on. This limits the number of cores and amount of memory that can be used, but guarantees all memory access remains local to the NUMA node.
-- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitraty core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus.
+- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitrary core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus.
These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index 5c693657c..49acd6bab 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -362,6 +362,9 @@ int main(int argc, char ** argv) {
params.interactive_first = true;
params.antiprompt.emplace_back("<|im_start|>user\n");
}
+ else if (params.conversation) {
+ params.interactive_first = true;
+ }
// enable interactive mode if interactive start is specified
if (params.interactive_first) {
@@ -544,7 +547,7 @@ int main(int argc, char ** argv) {
// if we run out of context:
// - take the n_keep first tokens from the original prompt (via n_past)
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
- if (n_past + (int) embd.size() + std::max(0, guidance_offset) > n_ctx) {
+ if (n_past + (int) embd.size() + std::max(0, guidance_offset) >= n_ctx) {
if (params.n_predict == -2) {
LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
break;
@@ -733,7 +736,7 @@ int main(int argc, char ** argv) {
// display text
if (input_echo && display) {
for (auto id : embd) {
- const std::string token_str = llama_token_to_piece(ctx, id);
+ const std::string token_str = llama_token_to_piece(ctx, id, !params.conversation);
printf("%s", token_str.c_str());
if (embd.size() > 1) {
@@ -796,7 +799,7 @@ int main(int argc, char ** argv) {
// deal with end of generation tokens in interactive mode
if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
- LOG("found EOS token\n");
+ LOG("found an EOG token\n");
if (params.interactive) {
if (!params.antiprompt.empty()) {
@@ -816,7 +819,7 @@ int main(int argc, char ** argv) {
if (n_past > 0 && is_interacting) {
LOG("waiting for user input\n");
- if (params.instruct || params.chatml) {
+ if (params.conversation || params.instruct || params.chatml) {
printf("\n> ");
}
@@ -826,7 +829,7 @@ int main(int argc, char ** argv) {
}
std::string buffer;
- if (!params.input_prefix.empty()) {
+ if (!params.input_prefix.empty() && !params.conversation) {
LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
printf("%s", params.input_prefix.c_str());
}
@@ -850,7 +853,7 @@ int main(int argc, char ** argv) {
// Entering a empty line lets the user pass control back
if (buffer.length() > 1) {
// append input suffix if any
- if (!params.input_suffix.empty()) {
+ if (!params.input_suffix.empty() && !params.conversation) {
LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
printf("%s", params.input_suffix.c_str());
}
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 432cc2b4f..909eab283 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -46,7 +46,8 @@ static const std::vector QUANT_OPTIONS = {
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0008 ppl @ LLaMA-v1-7B", },
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
- { "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", },
+ { "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, -0.0020 ppl @ Mistral-7B", },
+ { "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
diff --git a/examples/server/README.md b/examples/server/README.md
index b96a4444a..650317991 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -62,6 +62,18 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/
- `--chat-template JINJA_TEMPLATE`: Set custom jinja chat template. This parameter accepts a string, not a file name. Default: template taken from model's metadata. We only support [some pre-defined templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
- `--log-disable`: Output logs to stdout only, not to `llama.log`. Default: enabled
- `--log-format FORMAT`: Define the log output to FORMAT: json or text Default: `json`
+- `--rope-scaling` : RoPE scaling method. Defaults to linear unless otherwise specified by the model. Options are `none`, `linear`, `yarn`
+- `--rope-freq-base N` : RoPE frequency base (default: loaded from model)
+- `--rope-freq-scale N`: RoPE frequency scaling factor, expands context by a factor of 1/N (e.g. 0.25)
+- `--yarn-ext-factor N` : YaRN: extrapolation mix factor (Default: 1.0, 0.0 = full interpolation)
+- `--yarn-attn-factor N` : YaRN: scale sqrt(t) or attention magnitude (default: 1.0)
+- `--yarn-beta-slow N`: YaRN: High correction dim or alpha (default: 1.0)
+- `--yarn-beta-fast N`: YaRN: low correction dim or beta (default: 32.0)
+- `--pooling` : Pooling type for embeddings, use model default if unspecified. Options are `none`, `mean`, `cls`
+- `-dt N`, `--defrag-thold N`: KV cache defragmentation threshold (default: -1.0, < 0 = disabled)
+- `-fa`, `--flash-attn` : enable flash attention (default: disabled).
+- `-ctk TYPE`, `--cache-type-k TYPE` : KV cache data type for K (default: `f16`, options `f32`, `f16`, `q8_0`, `q4_0`, `q4_1`, `iq4_nl`, `q5_0`, or `q5_1`)
+- `-ctv TYPE`, `--cache-type-v TYPE` : KV cache type for V (default `f16`, see `-ctk` for options)
**If compiled with `LLAMA_SERVER_SSL=ON`**
- `--ssl-key-file FNAME`: path to file a PEM-encoded SSL private key
@@ -260,7 +272,7 @@ node index.js
`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced. The tokens can also be represented as strings, e.g. `[["Hello, World!",-0.5]]` will reduce the likelihood of all the individual tokens that represent the string `Hello, World!`, just like the `presence_penalty` does. Default: `[]`
- `n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token. Default: `0`
+ `n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. Note that for temperature < 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Default: `0`
`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0`
@@ -319,7 +331,7 @@ Notice that each `probs` is an array of length `n_probs`.
`content`: Set the text to tokenize.
- Note that a special `BOS` token is never inserted.
+ `add_special`: Boolean indicating if special tokens, i.e. `BOS`, should be inserted. Default: `false`
- **POST** `/detokenize`: Convert tokens to text.
diff --git a/examples/server/public/favicon.ico b/examples/server/public/favicon.ico
new file mode 100644
index 000000000..89e154a0a
Binary files /dev/null and b/examples/server/public/favicon.ico differ
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index f60530cf3..305f79492 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -12,6 +12,8 @@
// increase max payload length to allow use of larger context size
#define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
#include "httplib.h"
+// Change JSON_ASSERT from assert() to GGML_ASSERT:
+#define JSON_ASSERT GGML_ASSERT
#include "json.hpp"
// auto generated files (update with ./deps.sh)
@@ -859,7 +861,7 @@ struct server_context {
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
// process "json_schema" and "grammar"
- if (data.contains("json_schema") && !data["json_schema"].is_null() && data.contains("grammar") && !data["grammar"].is_null()) {
+ if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
return false;
} else if (data.contains("json_schema") && !data.contains("grammar")) {
@@ -1383,9 +1385,10 @@ struct server_context {
if (!slot.params.stream && slot.stopped_word) {
const std::vector stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
+ size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
probs = std::vector(
slot.generated_token_probs.begin(),
- slot.generated_token_probs.end() - stop_word_toks.size());
+ slot.generated_token_probs.end() - safe_offset);
} else {
probs = std::vector(
slot.generated_token_probs.begin(),
@@ -1511,7 +1514,7 @@ struct server_context {
// add subtasks
for (int i = 0; i < prompt_count; i++) {
json subtask_data = multiprompt_task.data;
- subtask_data["prompt"] = subtask_data["prompt"][i];
+ subtask_data["prompt"] = subtask_data.at("prompt")[i];
// subtasks inherit everything else (infill mode, embedding mode, etc.)
request_completion(subtask_ids[i], id_multi, subtask_data, multiprompt_task.infill, multiprompt_task.embedding);
@@ -1531,7 +1534,7 @@ struct server_context {
}
if (task.data.contains("system_prompt")) {
- system_prompt_set(task.data["system_prompt"]);
+ system_prompt_set(task.data.at("system_prompt"));
for (server_slot & slot : slots) {
slot.n_past = 0;
@@ -1643,7 +1646,7 @@ struct server_context {
} break;
case SERVER_TASK_TYPE_SLOT_SAVE:
{
- int id_slot = task.data["id_slot"];
+ int id_slot = task.data.at("id_slot");
server_slot * slot = get_slot(id_slot);
if (slot == nullptr) {
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
@@ -1653,8 +1656,8 @@ struct server_context {
const size_t token_count = slot->cache_tokens.size();
const int64_t t_start = ggml_time_us();
- std::string filename = task.data["filename"];
- std::string filepath = task.data["filepath"];
+ std::string filename = task.data.at("filename");
+ std::string filepath = task.data.at("filepath");
const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count);
@@ -1678,7 +1681,7 @@ struct server_context {
} break;
case SERVER_TASK_TYPE_SLOT_RESTORE:
{
- int id_slot = task.data["id_slot"];
+ int id_slot = task.data.at("id_slot");
server_slot * slot = get_slot(id_slot);
if (slot == nullptr) {
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
@@ -1687,8 +1690,8 @@ struct server_context {
const int64_t t_start = ggml_time_us();
- std::string filename = task.data["filename"];
- std::string filepath = task.data["filepath"];
+ std::string filename = task.data.at("filename");
+ std::string filepath = task.data.at("filepath");
slot->cache_tokens.resize(slot->n_ctx);
size_t token_count = 0;
@@ -1720,7 +1723,7 @@ struct server_context {
} break;
case SERVER_TASK_TYPE_SLOT_ERASE:
{
- int id_slot = task.data["id_slot"];
+ int id_slot = task.data.at("id_slot");
server_slot * slot = get_slot(id_slot);
if (slot == nullptr) {
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
@@ -2265,17 +2268,31 @@ struct server_context {
llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
result.tok = id;
- const int32_t n_probs = slot.sparams.n_probs;
- if (slot.sparams.temp <= 0 && n_probs > 0) {
- // for llama_sample_token_greedy we need to sort candidates
- llama_sample_softmax(ctx, &cur_p);
- }
+ const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
+ if (n_probs > 0) {
+ const size_t n_considered = slot.ctx_sampling->n_considered;
- for (size_t i = 0; i < std::min(cur_p.size, (size_t) n_probs); ++i) {
- result.probs.push_back({
- cur_p.data[i].id,
- cur_p.data[i].p
- });
+ // Make sure at least n_probs top tokens are at the front of the vector:
+ if (slot.sparams.temp == 0.0f && n_probs > n_considered) {
+ llama_sample_top_k(ctx, &cur_p, n_probs, 0);
+ }
+
+ if (slot.sparams.temp == 0.0f) {
+ // With greedy sampling the probabilities have possibly not been calculated.
+ for (size_t i = 0; i < n_probs; ++i) {
+ result.probs.push_back({
+ cur_p.data[i].id,
+ i == 0 ? 1.0f : 0.0f
+ });
+ }
+ } else {
+ for (size_t i = 0; i < n_probs; ++i) {
+ result.probs.push_back({
+ cur_p.data[i].id,
+ i >= n_considered ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
+ });
+ }
+ }
}
if (!process_token(result, slot)) {
@@ -3121,8 +3138,8 @@ int main(int argc, char ** argv) {
server_task_result result = ctx_server.queue_results.recv(task.id);
ctx_server.queue_results.remove_waiting_task_id(task.id);
- const int n_idle_slots = result.data["idle"];
- const int n_processing_slots = result.data["processing"];
+ const int n_idle_slots = result.data.at("idle");
+ const int n_processing_slots = result.data.at("processing");
json health = {
{"status", "ok"},
@@ -3132,7 +3149,7 @@ int main(int argc, char ** argv) {
res.status = 200; // HTTP OK
if (sparams.slots_endpoint && req.has_param("include_slots")) {
- health["slots"] = result.data["slots"];
+ health["slots"] = result.data.at("slots");
}
if (n_idle_slots == 0) {
@@ -3176,7 +3193,7 @@ int main(int argc, char ** argv) {
server_task_result result = ctx_server.queue_results.recv(task.id);
ctx_server.queue_results.remove_waiting_task_id(task.id);
- res.set_content(result.data["slots"].dump(), "application/json");
+ res.set_content(result.data.at("slots").dump(), "application/json");
res.status = 200; // HTTP OK
};
@@ -3203,32 +3220,32 @@ int main(int argc, char ** argv) {
json data = result.data;
- const uint64_t n_prompt_tokens_processed = data["n_prompt_tokens_processed"];
- const uint64_t t_prompt_processing = data["t_prompt_processing"];
+ const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed");
+ const uint64_t t_prompt_processing = data.at("t_prompt_processing");
- const uint64_t n_tokens_predicted = data["n_tokens_predicted"];
- const uint64_t t_tokens_generation = data["t_tokens_generation"];
+ const uint64_t n_tokens_predicted = data.at("n_tokens_predicted");
+ const uint64_t t_tokens_generation = data.at("t_tokens_generation");
- const int32_t kv_cache_used_cells = data["kv_cache_used_cells"];
+ const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells");
// metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
json all_metrics_def = json {
{"counter", {{
{"name", "prompt_tokens_total"},
{"help", "Number of prompt tokens processed."},
- {"value", (uint64_t) data["n_prompt_tokens_processed_total"]}
+ {"value", (uint64_t) data.at("n_prompt_tokens_processed_total")}
}, {
{"name", "prompt_seconds_total"},
{"help", "Prompt process time"},
- {"value", (uint64_t) data["t_prompt_processing_total"] / 1.e3}
+ {"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3}
}, {
{"name", "tokens_predicted_total"},
{"help", "Number of generation tokens processed."},
- {"value", (uint64_t) data["n_tokens_predicted_total"]}
+ {"value", (uint64_t) data.at("n_tokens_predicted_total")}
}, {
{"name", "tokens_predicted_seconds_total"},
{"help", "Predict process time"},
- {"value", (uint64_t) data["t_tokens_generation_total"] / 1.e3}
+ {"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3}
}}},
{"gauge", {{
{"name", "prompt_tokens_seconds"},
@@ -3245,15 +3262,15 @@ int main(int argc, char ** argv) {
},{
{"name", "kv_cache_tokens"},
{"help", "KV-cache tokens."},
- {"value", (uint64_t) data["kv_cache_tokens_count"]}
+ {"value", (uint64_t) data.at("kv_cache_tokens_count")}
},{
{"name", "requests_processing"},
{"help", "Number of request processing."},
- {"value", (uint64_t) data["processing"]}
+ {"value", (uint64_t) data.at("processing")}
},{
{"name", "requests_deferred"},
{"help", "Number of request deferred."},
- {"value", (uint64_t) data["deferred"]}
+ {"value", (uint64_t) data.at("deferred")}
}}}
};
@@ -3264,8 +3281,8 @@ int main(int argc, char ** argv) {
const auto & metrics_def = el.value();
for (const auto & metric_def : metrics_def) {
- const std::string name = metric_def["name"];
- const std::string help = metric_def["help"];
+ const std::string name = metric_def.at("name");
+ const std::string help = metric_def.at("help");
auto value = json_value(metric_def, "value", 0.);
prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
@@ -3274,7 +3291,7 @@ int main(int argc, char ** argv) {
}
}
- const int64_t t_start = data["t_start"];
+ const int64_t t_start = data.at("t_start");
res.set_header("Process-Start-Time-Unix", std::to_string(t_start));
res.set_content(prometheus.str(), "text/plain; version=0.0.4");
@@ -3283,7 +3300,7 @@ int main(int argc, char ** argv) {
const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
json request_data = json::parse(req.body);
- std::string filename = request_data["filename"];
+ std::string filename = request_data.at("filename");
if (!validate_file_name(filename)) {
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
return;
@@ -3313,7 +3330,7 @@ int main(int argc, char ** argv) {
const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
json request_data = json::parse(req.body);
- std::string filename = request_data["filename"];
+ std::string filename = request_data.at("filename");
if (!validate_file_name(filename)) {
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
return;
@@ -3632,7 +3649,8 @@ int main(int argc, char ** argv) {
std::vector tokens;
if (body.count("content") != 0) {
- tokens = ctx_server.tokenize(body["content"], false);
+ const bool add_special = json_value(body, "add_special", false);
+ tokens = ctx_server.tokenize(body.at("content"), add_special);
}
const json data = format_tokenizer_response(tokens);
return res.set_content(data.dump(), "application/json; charset=utf-8");
@@ -3644,7 +3662,7 @@ int main(int argc, char ** argv) {
std::string content;
if (body.count("tokens") != 0) {
- const std::vector tokens = body["tokens"];
+ const std::vector tokens = body.at("tokens");
content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend());
}
@@ -3667,10 +3685,10 @@ int main(int argc, char ** argv) {
json prompt;
if (body.count("input") != 0) {
is_openai = true;
- prompt = body["input"];
+ prompt = body.at("input");
} else if (body.count("content") != 0) {
// with "content", we only support single prompt
- prompt = std::vector{body["content"]};
+ prompt = std::vector{body.at("content")};
} else {
res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
return;
@@ -3689,7 +3707,7 @@ int main(int argc, char ** argv) {
if (!result.error) {
if (result.data.count("results")) {
// result for multi-task
- responses = result.data["results"];
+ responses = result.data.at("results");
} else {
// result for single task
responses = std::vector{result.data};
diff --git a/examples/server/tests/features/results.feature b/examples/server/tests/features/results.feature
index f17120f7b..aa0b8d0c6 100644
--- a/examples/server/tests/features/results.feature
+++ b/examples/server/tests/features/results.feature
@@ -7,44 +7,16 @@ Feature: Results
And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models
And a model file test-model-00001-of-00003.gguf
And 128 as batch size
- And 256 KV cache size
+ And 1024 KV cache size
And 128 max tokens to predict
-
- Scenario Outline: Multi users completion
- Given slots
And continuous batching
+
+ Scenario Outline: consistent results with same seed
+ Given slots
Then the server is starting
Then the server is healthy
- Given 42 as seed
- And a prompt:
- """
- Write a very long story about AI.
- """
-
- Given 42 as seed
- And a prompt:
- """
- Write a very long story about AI.
- """
-
- Given 42 as seed
- And a prompt:
- """
- Write a very long story about AI.
- """
-
- Given 42 as seed
- And a prompt:
- """
- Write a very long story about AI.
- """
-
- Given 42 as seed
- And a prompt:
- """
- Write a very long story about AI.
- """
+ Given 4 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 42
Given concurrent completion requests
Then the server is busy
@@ -55,3 +27,55 @@ Feature: Results
| n_slots |
| 1 |
| 2 |
+
+ Scenario Outline: different results with different seed
+ Given slots
+ Then the server is starting
+ Then the server is healthy
+
+ Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 42
+ Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 43
+ Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 44
+ Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 45
+
+ Given concurrent completion requests
+ Then the server is busy
+ Then the server is idle
+ And all slots are idle
+ Then all predictions are different
+ Examples:
+ | n_slots |
+ | 1 |
+ | 2 |
+
+ Scenario Outline: consistent results with same seed and varying batch size
+ Given 4 slots
+ And temperature
+ # And 0 as draft
+ Then the server is starting
+ Then the server is healthy
+
+ Given 1 prompts "Write a very long story about AI." with seed 42
+ And concurrent completion requests
+ # Then the server is busy # Not all slots will be utilized.
+ Then the server is idle
+ And all slots are idle
+
+ Given prompts "Write a very long story about AI." with seed 42
+ And concurrent completion requests
+ # Then the server is busy # Not all slots will be utilized.
+ Then the server is idle
+ And all slots are idle
+
+ Then all predictions are equal
+ Examples:
+ | n_parallel | temp |
+ | 1 | 0.0 |
+ | 2 | 0.0 |
+ | 4 | 0.0 |
+ | 1 | 1.0 |
+ # FIXME: These tests fail on master. The problem seems to be the unified KV cache.
+ # See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
+ # and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574 .
+ # | 2 | 1.0 |
+ # | 4 | 1.0 |
diff --git a/examples/server/tests/features/server.feature b/examples/server/tests/features/server.feature
index 646a4e49d..d21c09135 100644
--- a/examples/server/tests/features/server.feature
+++ b/examples/server/tests/features/server.feature
@@ -7,6 +7,7 @@ Feature: llama.cpp server
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
And a model file test-model.gguf
And a model alias tinyllama-2
+ And BOS token is 1
And 42 as server seed
# KV Cache corresponds to the total amount of tokens
# that can be stored across all independent sequences: #4130
@@ -91,7 +92,18 @@ Feature: llama.cpp server
"""
What is the capital of France ?
"""
- Then tokens can be detokenize
+ Then tokens can be detokenized
+ And tokens do not begin with BOS
+
+ Scenario: Tokenize w/ BOS
+ Given adding special tokens
+ When tokenizing:
+ """
+ What is the capital of Germany?
+ """
+ Then tokens begin with BOS
+ Given first token is removed
+ Then tokens can be detokenized
Scenario: Models available
Given available models
diff --git a/examples/server/tests/features/steps/steps.py b/examples/server/tests/features/steps/steps.py
index f71e0d706..f4b1ac1d7 100644
--- a/examples/server/tests/features/steps/steps.py
+++ b/examples/server/tests/features/steps/steps.py
@@ -65,6 +65,7 @@ def step_server_config(context, server_fqdn, server_port):
context.server_seed = None
context.user_api_key = None
context.response_format = None
+ context.temperature = None
context.tasks_result = []
context.concurrent_tasks = []
@@ -232,15 +233,17 @@ async def step_all_slots_status(context, expected_slot_status_string):
@async_run_until_complete
async def step_request_completion(context, api_error):
expect_api_error = api_error == 'raised'
+ seeds = await completions_seed(context, num_seeds=1)
completion = await request_completion(context.prompts.pop(),
+ seeds[0] if seeds is not None else seeds,
context.base_url,
debug=context.debug,
n_predict=context.n_predict,
cache_prompt=context.cache_prompt,
id_slot=context.id_slot,
- seed=await completions_seed(context),
expect_api_error=expect_api_error,
- user_api_key=context.user_api_key)
+ user_api_key=context.user_api_key,
+ temperature=context.temperature)
context.tasks_result.append(completion)
if context.debug:
print(f"Completion response: {completion}")
@@ -269,6 +272,15 @@ async def step_predictions_equal(context):
context.tasks_result = []
+@step('all predictions are different')
+@async_run_until_complete
+async def step_predictions_equal(context):
+ n_completions = await gather_tasks_results(context)
+ assert n_completions >= 2, "need at least 2 completions"
+ assert_all_predictions_different(context.tasks_result)
+ context.tasks_result = []
+
+
@step('the completion is truncated')
def step_assert_completion_truncated(context):
step_assert_completion_truncated(context, '')
@@ -311,6 +323,11 @@ def step_response_format(context, response_format):
context.response_format = json.loads(response_format)
+@step('{temperature:f} temperature')
+def step_temperature(context, temperature):
+ context.temperature = temperature
+
+
@step('streaming is {enable_streaming}')
def step_streaming(context, enable_streaming):
context.enable_streaming = enable_streaming == 'enabled'
@@ -353,7 +370,15 @@ def step_n_ubatch(context, n_ubatch):
@step('{seed:d} as seed')
def step_seed(context, seed):
- context.seed = seed
+ if context.seed is None:
+ context.seed = [seed]
+ else:
+ context.seed.append(seed)
+
+
+@step('BOS token is {bos:d}')
+def step_bos_token(context, bos):
+ context.bos = bos
@step('a prefix prompt')
@@ -413,7 +438,9 @@ async def step_oai_chat_completions(context, api_error):
if context.debug:
print(f"Submitting OAI compatible completions request...")
expect_api_error = api_error == 'raised'
+ seeds = await completions_seed(context, num_seeds=1),
completion = await oai_chat_completions(context.prompts.pop(),
+ seeds[0] if seeds is not None else seeds,
context.system_prompt,
context.base_url,
'/v1/chat',
@@ -429,8 +456,6 @@ async def step_oai_chat_completions(context, api_error):
response_format=context.response_format
if hasattr(context, 'response_format') else None,
- seed=await completions_seed(context),
-
user_api_key=context.user_api_key
if hasattr(context, 'user_api_key') else None,
@@ -457,20 +482,31 @@ def step_a_prompt_prompt(context, prompt):
context.n_prompts = len(context.prompts)
+@step('{num_prompts:d} prompts {prompt} with seed {seed:d}')
+def step_many_prompts(context, num_prompts, prompt, seed):
+ if context.seed is None:
+ context.seed = []
+ for _ in range(num_prompts):
+ context.seed.append(seed)
+ context.prompts.append(prompt)
+ context.n_prompts = len(context.prompts)
+
+
@step('concurrent completion requests')
@async_run_until_complete()
async def step_concurrent_completion_requests(context):
- await concurrent_requests(context,
- request_completion,
- # prompt is inserted automatically
- context.base_url,
- debug=context.debug,
- prompt_prefix=context.prompt_prefix,
- prompt_suffix=context.prompt_suffix,
- n_predict=context.n_predict if hasattr(context, 'n_predict') else None,
- seed=await completions_seed(context),
- user_api_key=context.user_api_key if hasattr(context,
- 'user_api_key') else None)
+ await concurrent_requests(
+ context,
+ request_completion,
+ # prompt is inserted automatically
+ context.base_url,
+ debug=context.debug,
+ prompt_prefix=context.prompt_prefix,
+ prompt_suffix=context.prompt_suffix,
+ n_predict=context.n_predict if hasattr(context, 'n_predict') else None,
+ user_api_key=context.user_api_key if hasattr(context, 'user_api_key') else None,
+ temperature=context.temperature,
+ )
@step('concurrent OAI completions requests')
@@ -490,7 +526,6 @@ async def step_oai_chat_completions(context):
if hasattr(context, 'enable_streaming') else None,
response_format=context.response_format
if hasattr(context, 'response_format') else None,
- seed=await completions_seed(context),
user_api_key=context.user_api_key
if hasattr(context, 'user_api_key') else None)
@@ -512,10 +547,6 @@ async def step_oai_chat_completions(context):
if hasattr(context, 'enable_streaming') else None,
response_format=context.response_format
if hasattr(context, 'response_format') else None,
- seed=context.seed
- if hasattr(context, 'seed') else
- context.server_seed
- if hasattr(context, 'server_seed') else None,
user_api_key=context.user_api_key
if hasattr(context, 'user_api_key') else None)
@@ -544,7 +575,7 @@ async def all_prompts_are_predicted(context, expected_predicted_n=None):
@async_run_until_complete
async def step_compute_embedding(context):
context.n_prompts = 1
- context.embeddings = await request_embedding(context_text(context), base_url=context.base_url)
+ context.embeddings = await request_embedding(context_text(context), None, base_url=context.base_url)
@step('all embeddings are the same')
@@ -585,7 +616,7 @@ def step_assert_embeddings(context):
@async_run_until_complete
async def step_oai_compute_embeddings(context):
context.n_prompts = 1
- context.embeddings = await request_oai_embeddings(context_text(context),
+ context.embeddings = await request_oai_embeddings(context_text(context), None,
base_url=context.base_url,
user_api_key=context.user_api_key,
model=context.model)
@@ -594,7 +625,7 @@ async def step_oai_compute_embeddings(context):
@step('an OAI compatible embeddings computation request for multiple inputs')
@async_run_until_complete
async def step_oai_compute_embeddings_multiple_inputs(context):
- context.embeddings = await request_oai_embeddings(context.prompts,
+ context.embeddings = await request_oai_embeddings(context.prompts, None,
base_url=context.base_url,
user_api_key=context.user_api_key,
model=context.model)
@@ -630,21 +661,29 @@ async def all_embeddings_are_generated(context):
assert_embeddings(context.tasks_result.pop().pop())
+@step('adding special tokens')
+def step_tokenize_set_add_special(context):
+ context.tokenize_add_special = True
+
+
@step('tokenizing')
@async_run_until_complete
async def step_tokenize(context):
context.tokenized_text = context_text(context)
async with aiohttp.ClientSession() as session:
+ tokenize_args = {
+ "content": context.tokenized_text,
+ }
+ if getattr(context, 'tokenize_add_special', None) is not None:
+ tokenize_args['add_special'] = context.tokenize_add_special
async with session.post(f'{context.base_url}/tokenize',
- json={
- "content": context.tokenized_text,
- }) as response:
+ json=tokenize_args) as response:
assert response.status == 200
tokenize_json = await response.json()
context.tokens = tokenize_json['tokens']
-@step('tokens can be detokenize')
+@step('tokens can be detokenized')
@async_run_until_complete
async def step_detokenize(context):
assert len(context.tokens) > 0
@@ -659,6 +698,21 @@ async def step_detokenize(context):
assert context.tokenized_text == detokenize_json['content'].strip()
+@step('tokens begin with BOS')
+def step_strings_for_tokenization(context):
+ assert context.tokens[0] == context.bos
+
+
+@step('tokens do not begin with BOS')
+def step_strings_for_tokenization(context):
+ assert context.tokens[0] != context.bos
+
+
+@step('first token is removed')
+def step_strings_for_tokenization(context):
+ context.tokens = context.tokens[1:]
+
+
@step('an OPTIONS request is sent from {origin}')
@async_run_until_complete
async def step_options_request(context, origin):
@@ -740,8 +794,9 @@ async def concurrent_requests(context, f_completion, *args, **kwargs):
if context.debug:
print(f"starting {context.n_prompts} concurrent completion requests...")
assert context.n_prompts > 0
+ seeds = await completions_seed(context)
for prompt_no in range(context.n_prompts):
- shifted_args = [context.prompts.pop(), *args]
+ shifted_args = [context.prompts.pop(), seeds[prompt_no], *args]
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
await asyncio.sleep(0.1)
@@ -781,6 +836,7 @@ def step_server_responds_with_status_code(context, status_code):
async def request_completion(prompt,
+ seed,
base_url,
debug=False,
prompt_prefix=None,
@@ -788,9 +844,9 @@ async def request_completion(prompt,
n_predict=None,
cache_prompt=False,
id_slot=None,
- seed=None,
expect_api_error=None,
- user_api_key=None):
+ user_api_key=None,
+ temperature=None):
if debug:
print(f"Sending completion request: {prompt}")
origin = "my.super.domain"
@@ -811,7 +867,8 @@ async def request_completion(prompt,
"n_predict": n_predict if n_predict is not None else -1,
"cache_prompt": cache_prompt,
"id_slot": id_slot,
- "seed": seed if seed is not None else 42
+ "seed": seed if seed is not None else 42,
+ "temperature": temperature if temperature is not None else "0.8f",
},
headers=headers,
timeout=3600) as response:
@@ -824,6 +881,7 @@ async def request_completion(prompt,
async def oai_chat_completions(user_prompt,
+ seed,
system_prompt,
base_url,
base_path,
@@ -833,7 +891,6 @@ async def oai_chat_completions(user_prompt,
n_predict=None,
enable_streaming=None,
response_format=None,
- seed=None,
user_api_key=None,
expect_api_error=None):
if debug:
@@ -882,7 +939,7 @@ async def oai_chat_completions(user_prompt,
while event_received:
event_received = False
async for line_in_bytes in response.content:
- line = line_in_bytes.decode('utf8')
+ line = line_in_bytes.decode('utf-8')
line = line.rstrip('\n').rstrip('\r')
if line == '':
continue
@@ -952,7 +1009,7 @@ async def oai_chat_completions(user_prompt,
return completion_response
-async def request_embedding(content, base_url=None):
+async def request_embedding(content, seed, base_url=None):
async with aiohttp.ClientSession() as session:
async with session.post(f'{base_url}/embedding',
json={
@@ -963,7 +1020,7 @@ async def request_embedding(content, base_url=None):
return [response_json['embedding']]
-async def request_oai_embeddings(input,
+async def request_oai_embeddings(input, seed,
base_url=None, user_api_key=None,
model=None, async_client=False):
# openai client always expects an api_key
@@ -1036,21 +1093,31 @@ def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re
f' {n_predicted} <> {expected_predicted_n}')
def assert_all_predictions_equal(completion_responses):
- content_0 = completion_responses[0]['content']
-
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
- print(f"content 0: {content_0}")
+ for i, response_i in enumerate(completion_responses):
+ content_i = response_i['content']
+ print(f"content {i}: {content_i}")
+ for i, response_i in enumerate(completion_responses):
+ content_i = response_i['content']
+ for j, response_j in enumerate(completion_responses):
+ if i == j:
+ continue
+ content_j = response_j['content']
+ assert content_i == content_j, "contents not equal"
- i = 1
- for response in completion_responses[1:]:
- content = response['content']
- if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
- print(f"content {i}: {content}")
-
- assert content == content_0, "contents not equal"
-
- i += 1
+def assert_all_predictions_different(completion_responses):
+ if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
+ for i, response_i in enumerate(completion_responses):
+ content_i = response_i['content']
+ print(f"content {i}: {content_i}")
+ for i, response_i in enumerate(completion_responses):
+ content_i = response_i['content']
+ for j, response_j in enumerate(completion_responses):
+ if i == j:
+ continue
+ content_j = response_j['content']
+ assert content_i != content_j, "contents not different"
async def gather_tasks_results(context):
@@ -1145,9 +1212,22 @@ def assert_slots_status(slots, expected_slots):
f" = {expected[key]} != {slot[key]}")
-async def completions_seed(context):
- return context.seed if hasattr(context, 'seed') and context.seed is not None \
- else context.server_seed if hasattr(context, 'server_seed') else None
+async def completions_seed(context, num_seeds=None):
+ if hasattr(context, "seed") and context.seed is not None:
+ assert len(context.seed) == context.n_prompts
+ if num_seeds is None:
+ num_seeds = context.n_prompts
+ assert num_seeds <= context.n_prompts
+ seeds = context.seed[:num_seeds]
+ context.seed = context.seed[num_seeds:] if num_seeds < context.n_prompts else None
+ return seeds
+
+ if hasattr(context, "server_seed") and context.server_seed is not None:
+ if num_seeds is None:
+ return [context.server_seed] * context.n_prompts
+ else:
+ return [context.server_seed] * num_seeds
+ return None
def context_text(context):
diff --git a/examples/server/themes/README.md b/examples/server/themes/README.md
new file mode 100644
index 000000000..62e721a27
--- /dev/null
+++ b/examples/server/themes/README.md
@@ -0,0 +1,5 @@
+# LLaMA.cpp Server Wild Theme
+
+Simple themes directory of sample "public" directories. To try any of these add --path to your run like `server --path=wild`.
+
+
diff --git a/examples/server/themes/buttons-top/README.md b/examples/server/themes/buttons-top/README.md
new file mode 100644
index 000000000..808c4cf81
--- /dev/null
+++ b/examples/server/themes/buttons-top/README.md
@@ -0,0 +1,7 @@
+# LLaMA.cpp Server Buttons Top Theme
+
+Simple tweaks to the UI. Chat buttons at the top of the page instead of bottom so you can hit Stop instead of chasing it down the page.
+
+To use simply run server with `--path=themes/buttons_top`
+
+
diff --git a/examples/server/themes/buttons-top/buttons_top.png b/examples/server/themes/buttons-top/buttons_top.png
new file mode 100644
index 000000000..c54454519
Binary files /dev/null and b/examples/server/themes/buttons-top/buttons_top.png differ
diff --git a/examples/server/themes/buttons-top/favicon.ico b/examples/server/themes/buttons-top/favicon.ico
new file mode 100644
index 000000000..89e154a0a
Binary files /dev/null and b/examples/server/themes/buttons-top/favicon.ico differ
diff --git a/examples/server/themes/buttons-top/index.html b/examples/server/themes/buttons-top/index.html
new file mode 100644
index 000000000..6af30d307
--- /dev/null
+++ b/examples/server/themes/buttons-top/index.html
@@ -0,0 +1,1057 @@
+
+
+
+
+
+
+ llama.cpp - chat
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/server/themes/wild/README.md b/examples/server/themes/wild/README.md
new file mode 100644
index 000000000..560bcc81b
--- /dev/null
+++ b/examples/server/themes/wild/README.md
@@ -0,0 +1,5 @@
+# LLaMA.cpp Server Wild Theme
+
+Simple tweaks to the UI. To use simply run server with `--path=themes/wild`
+
+
diff --git a/examples/server/themes/wild/favicon.ico b/examples/server/themes/wild/favicon.ico
new file mode 100644
index 000000000..89e154a0a
Binary files /dev/null and b/examples/server/themes/wild/favicon.ico differ
diff --git a/examples/server/themes/wild/index.html b/examples/server/themes/wild/index.html
new file mode 100644
index 000000000..772e716cd
--- /dev/null
+++ b/examples/server/themes/wild/index.html
@@ -0,0 +1,1061 @@
+
+
+
+
+
+
+ llama.cpp - chat
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/server/themes/wild/llama_cpp.png b/examples/server/themes/wild/llama_cpp.png
new file mode 100644
index 000000000..bad1dc9fc
Binary files /dev/null and b/examples/server/themes/wild/llama_cpp.png differ
diff --git a/examples/server/themes/wild/llamapattern.png b/examples/server/themes/wild/llamapattern.png
new file mode 100644
index 000000000..2a159ce6a
Binary files /dev/null and b/examples/server/themes/wild/llamapattern.png differ
diff --git a/examples/server/themes/wild/wild.png b/examples/server/themes/wild/wild.png
new file mode 100644
index 000000000..46ffa0f3e
Binary files /dev/null and b/examples/server/themes/wild/wild.png differ
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp
index 1a2212502..d872b63f5 100644
--- a/examples/server/utils.hpp
+++ b/examples/server/utils.hpp
@@ -3,6 +3,8 @@
#include "llama.h"
#include "common.h"
+// Change JSON_ASSERT from assert() to GGML_ASSERT:
+#define JSON_ASSERT GGML_ASSERT
#include "json.hpp"
#include
@@ -49,18 +51,18 @@ extern bool server_log_json;
#define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
-static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra);
+static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra);
template
-static T json_value(const json &body, const std::string &key, const T &default_value) {
+static T json_value(const json & body, const std::string & key, const T & default_value) {
// Fallback null to default value
- if (body.contains(key) && !body.at(key).is_null()){
+ if (body.contains(key) && !body.at(key).is_null()) {
try {
- return body.value(key, default_value);
- }
- catch (nlohmann::json_abi_v3_11_3::detail::type_error const&){
- std::string message = "Wrong type supplied for parameter '" + key + "'. Expected '" + typeid(default_value).name() + "', using default value.";
- server_log("WARN", __func__, __LINE__, message.c_str(), body);
+ return body.at(key);
+ } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
+ std::stringstream ss;
+ ss << "Wrong type supplied for parameter '" << key << "'. Expected '" << json(default_value).type_name() << "', using default value.";
+ LOG_WARNING(ss.str().c_str(), body);
return default_value;
}
} else {
@@ -68,16 +70,16 @@ static T json_value(const json &body, const std::string &key, const T &default_v
}
}
-static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra) {
+static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra) {
std::stringstream ss_tid;
ss_tid << std::this_thread::get_id();
- json log = nlohmann::ordered_json{
+ json log = json{
{"tid", ss_tid.str()},
{"timestamp", time(nullptr)},
};
if (server_log_json) {
- log.merge_patch( {
+ log.merge_patch({
{"level", level},
{"function", function},
{"line", line},
@@ -98,7 +100,7 @@ static inline void server_log(const char *level, const char *function, int line,
}
std::stringstream ss;
ss << buf << " |";
- for (const auto& el : log.items())
+ for (const auto & el : log.items())
{
const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
ss << " " << el.key() << "=" << value;
@@ -373,11 +375,11 @@ static json oaicompat_completion_params_parse(
llama_params["top_p"] = json_value(body, "top_p", 1.0);
// Apply chat template to the list of messages
- llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
+ llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
// Handle "stop" field
- if (body.contains("stop") && body["stop"].is_string()) {
- llama_params["stop"] = json::array({body["stop"].get()});
+ if (body.contains("stop") && body.at("stop").is_string()) {
+ llama_params["stop"] = json::array({body.at("stop").get()});
} else {
llama_params["stop"] = json_value(body, "stop", json::array());
}
diff --git a/examples/sycl/README.md b/examples/sycl/README.md
index b46f17f39..c589c2d3a 100644
--- a/examples/sycl/README.md
+++ b/examples/sycl/README.md
@@ -1,6 +1,6 @@
# llama.cpp/example/sycl
-This example program provide the tools for llama.cpp for SYCL on Intel GPU.
+This example program provides the tools for llama.cpp for SYCL on Intel GPU.
## Tool
diff --git a/flake.lock b/flake.lock
index b738da7c6..c9ead0bf7 100644
--- a/flake.lock
+++ b/flake.lock
@@ -5,11 +5,11 @@
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
- "lastModified": 1712014858,
- "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
+ "lastModified": 1714641030,
+ "narHash": "sha256-yzcRNDoyVP7+SCNX0wmuDju1NUCt8Dz9+lyUXEI0dbI=",
"owner": "hercules-ci",
"repo": "flake-parts",
- "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
+ "rev": "e5d10a24b66c3ea8f150e47dfdb0416ab7c3390e",
"type": "github"
},
"original": {
@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
- "lastModified": 1714076141,
- "narHash": "sha256-Drmja/f5MRHZCskS6mvzFqxEaZMeciScCTFxWVLqWEY=",
+ "lastModified": 1714635257,
+ "narHash": "sha256-4cPymbty65RvF1DWQfc+Bc8B233A1BWxJnNULJKQ1EY=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "7bb2ccd8cdc44c91edba16c48d2c8f331fb3d856",
+ "rev": "63c3a29ca82437c87573e4c6919b09a24ea61b0f",
"type": "github"
},
"original": {
@@ -36,20 +36,14 @@
},
"nixpkgs-lib": {
"locked": {
- "dir": "lib",
- "lastModified": 1711703276,
- "narHash": "sha256-iMUFArF0WCatKK6RzfUJknjem0H9m4KgorO/p3Dopkk=",
- "owner": "NixOS",
- "repo": "nixpkgs",
- "rev": "d8fe5e6c92d0d190646fb9f1056741a229980089",
- "type": "github"
+ "lastModified": 1714640452,
+ "narHash": "sha256-QBx10+k6JWz6u7VsohfSw8g8hjdBZEf8CFzXH1/1Z94=",
+ "type": "tarball",
+ "url": "https://github.com/NixOS/nixpkgs/archive/50eb7ecf4cd0a5756d7275c8ba36790e5bd53e33.tar.gz"
},
"original": {
- "dir": "lib",
- "owner": "NixOS",
- "ref": "nixos-unstable",
- "repo": "nixpkgs",
- "type": "github"
+ "type": "tarball",
+ "url": "https://github.com/NixOS/nixpkgs/archive/50eb7ecf4cd0a5756d7275c8ba36790e5bd53e33.tar.gz"
}
},
"root": {
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index c30554f0c..6f89a7cc3 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -113,7 +113,7 @@ static ggml_cuda_device_info ggml_cuda_init() {
for (int id = 0; id < info.device_count; ++id) {
int device_vmm = 0;
-#if !defined(GGML_USE_HIPBLAS)
+#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM)
CUdevice device;
CU_CHECK(cuDeviceGet(&device, id));
CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device));
@@ -259,7 +259,7 @@ struct ggml_cuda_pool_leg : public ggml_cuda_pool {
};
// pool with virtual memory
-#if !defined(GGML_USE_HIPBLAS)
+#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM)
struct ggml_cuda_pool_vmm : public ggml_cuda_pool {
static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB
@@ -356,7 +356,7 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool {
#endif // !defined(GGML_USE_HIPBLAS)
std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(int device) {
-#if !defined(GGML_USE_HIPBLAS)
+#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM)
if (ggml_cuda_info().devices[device].vmm) {
return std::unique_ptr(new ggml_cuda_pool_vmm(device));
}
@@ -1647,7 +1647,7 @@ static void ggml_cuda_op_mul_mat(
}
}
-static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
+static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
GGML_ASSERT(ggml_backend_buffer_is_cuda(src0->buffer));
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
@@ -1670,7 +1670,7 @@ static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const gg
ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream);
}
-static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
+static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(!ggml_is_transposed(src0));
GGML_ASSERT(!ggml_is_transposed(src1));
GGML_ASSERT(!ggml_is_permuted(src0));
@@ -2410,32 +2410,304 @@ GGML_CALL static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
GGML_UNUSED(backend);
}
+static void set_ggml_graph_node_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
+ graph_node_properties->node_address = node->data;
+ graph_node_properties->node_op = node->op;
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ graph_node_properties->ne[i] = node->ne[i];
+ graph_node_properties->nb[i] = node->nb[i];
+ }
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ graph_node_properties->src_address[i] = node->src[i] ? node->src[i]->data : nullptr;
+ }
+}
+
+static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
+ if (node->data != graph_node_properties->node_address &&
+ node->op != GGML_OP_CPY &&
+ node->op != GGML_OP_VIEW) {
+ return false;
+ }
+
+ if (node->op != graph_node_properties->node_op) {
+ return false;
+ }
+
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ if (node->ne[i] != graph_node_properties->ne[i]) {
+ return false;
+ }
+ if (node->nb[i] != graph_node_properties->nb[i]) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (node->src[i] &&
+ node->src[i]->data != graph_node_properties->src_address[i] &&
+ node->op != GGML_OP_CPY &&
+ node->op != GGML_OP_VIEW
+ ) {
+ return false;
+ }
+ }
+ return true;
+}
+
GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
ggml_cuda_set_device(cuda_ctx->device);
- for (int i = 0; i < cgraph->n_nodes; i++) {
- ggml_tensor * node = cgraph->nodes[i];
+#ifdef USE_CUDA_GRAPH
+ static const bool disable_cuda_graphs_due_to_env = (getenv("GGML_CUDA_DISABLE_GRAPHS") != nullptr);
- if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
- continue;
+ // Objects required for CUDA Graph
+ if (cuda_ctx->cuda_graph == nullptr) {
+ cuda_ctx->cuda_graph.reset(new ggml_cuda_graph());
+ }
+
+ bool use_cuda_graph = true;
+ bool cuda_graph_update_required = false;
+ // pointer to CUDA cpy kernel, which is required to identify
+ // kernel parameters which need updated in the graph for each token
+ void * ggml_cuda_cpy_fn_ptr = nullptr;
+
+ if (cuda_ctx->cuda_graph->graph == nullptr) {
+ if (ggml_cuda_info().devices[cuda_ctx->device].cc < CC_AMPERE) {
+ cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true;
+#ifndef NDEBUG
+ fprintf(stderr, "%s: disabling CUDA graphs due to GPU architecture\n", __func__);
+#endif
+ }
+ }
+
+ // Disable CUDA graphs in presence of env var, old GPU, use-case which is changing too rapidly,
+ // or previous graph capture failure.
+ // Also disable for multi-gpu for now. TO DO investigate
+ if (disable_cuda_graphs_due_to_env
+ || cuda_ctx->cuda_graph->disable_due_to_gpu_arch
+ || cuda_ctx->cuda_graph->disable_due_to_too_many_updates
+ || cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture) {
+ use_cuda_graph = false;
+ }
+
+ if (use_cuda_graph) {
+ if (cuda_ctx->cuda_graph->instance == nullptr) {
+ cuda_graph_update_required = true;
}
+ // Check if the graph size has changed
+ if (cuda_ctx->cuda_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes) {
+ cuda_graph_update_required = true;
+ cuda_ctx->cuda_graph->ggml_graph_properties.resize(cgraph->n_nodes);
+ }
+
+ // Loop over nodes in GGML graph to determine if CUDA graph update is required
+ // and store properties to allow this comparison for the next token
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ bool has_matching_properties = true;
+ if (!cuda_graph_update_required) {
+ has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]);
+ }
+ if (!has_matching_properties) {
+ cuda_graph_update_required = true;
+ }
+ set_ggml_graph_node_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]);
+ }
+
+ // Loop over nodes in GGML graph to obtain info needed for CUDA graph
+ cuda_ctx->cuda_graph->updated_kernel_arg.clear();
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ ggml_tensor * node = cgraph->nodes[i];
+
+ if (node->src[0] && ggml_backend_buffer_is_cuda_split(node->src[0]->buffer)) {
+ use_cuda_graph = false; // Split buffers are not supported by CUDA graph capture
#ifndef NDEBUG
- assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device));
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- if (node->src[j] != nullptr) {
- assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer));
+ fprintf(stderr, "%s: disabling CUDA graphs due to split buffer\n", __func__);
+#endif
+ }
+
+ if (node->op == GGML_OP_MUL_MAT_ID) {
+ use_cuda_graph = false; // This node type is not supported by CUDA graph capture
+#ifndef NDEBUG
+ fprintf(stderr, "%s: disabling CUDA graphs due to mul_mat_id\n", __func__);
+#endif
+ }
+
+ if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1) {
+ // disable CUDA graphs for batch size > 1 for now.
+ // Changes in batch size or context size can cause changes to the grid size of some kernels.
+ use_cuda_graph = false;
+#ifndef NDEBUG
+ fprintf(stderr, "%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]);
+#endif
+ }
+
+ if (node->op == GGML_OP_CPY) {
+ // store the copy op parameter which changes with each token.
+ cuda_ctx->cuda_graph->updated_kernel_arg.push_back((char **) &(node->src[1]->data));
+ if (ggml_cuda_cpy_fn_ptr == nullptr) {
+ // store a pointer to the copy op CUDA kernel to identify it later
+ ggml_cuda_cpy_fn_ptr = ggml_cuda_cpy_fn(node->src[0], node->src[1]);
+ }
+ }
+
+ if (!use_cuda_graph) {
+ break;
}
}
+
+ // Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates.
+ if (cuda_graph_update_required) {
+ cuda_ctx->cuda_graph->number_consecutive_updates++;
+ } else {
+ cuda_ctx->cuda_graph->number_consecutive_updates = 0;
+ }
+
+ if (cuda_ctx->cuda_graph->number_consecutive_updates >= 4) {
+ cuda_ctx->cuda_graph->disable_due_to_too_many_updates = true;
+#ifndef NDEBUG
+ fprintf(stderr, "%s: disabling CUDA graphs due to too many consecutive updates\n", __func__);
+#endif
+ }
+ }
+
+ if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture
+ CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed));
+ }
+
+#else
+ bool use_cuda_graph = false;
+ bool cuda_graph_update_required = false;
+#endif // USE_CUDA_GRAPH
+
+ bool graph_evaluated_or_captured = false;
+
+ while (!graph_evaluated_or_captured) {
+ // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph.
+ // With the use of CUDA graphs, the execution will be performed by the graph launch.
+ if (!use_cuda_graph || cuda_graph_update_required) {
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ ggml_tensor * node = cgraph->nodes[i];
+
+ if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
+ continue;
+ }
+
+#ifndef NDEBUG
+ assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device));
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ if (node->src[j] != nullptr) {
+ assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer));
+ }
+ }
#endif
- bool ok = ggml_cuda_compute_forward(*cuda_ctx, node);
- if (!ok) {
- fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
+ bool ok = ggml_cuda_compute_forward(*cuda_ctx, node);
+ if (!ok) {
+ fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
+ }
+ GGML_ASSERT(ok);
+ }
}
- GGML_ASSERT(ok);
+
+#ifdef USE_CUDA_GRAPH
+ if (use_cuda_graph && cuda_graph_update_required) { // End CUDA graph capture
+ if (cuda_ctx->cuda_graph->graph != nullptr) {
+ CUDA_CHECK(cudaGraphDestroy(cuda_ctx->cuda_graph->graph));
+ cuda_ctx->cuda_graph->graph = nullptr;
+ }
+ CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph));
+
+#if 0
+ if (disable_cuda_graphs_due_to_failed_capture) {
+ use_cuda_graph = false;
+ cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture = true;
+#ifndef NDEBUG
+ fprintf(stderr, "%s: disabling CUDA graphs due to failed graph capture\n", __func__);
+#endif
+ } else {
+ graph_evaluated_or_captured = true; // CUDA graph has been captured
+ }
+#endif
+ graph_evaluated_or_captured = true; // CUDA graph has been captured
+ } else {
+ graph_evaluated_or_captured = true; // ggml graph has been directly evaluated
+ }
+ }
+
+ if (use_cuda_graph) {
+ if (cuda_ctx->cuda_graph->instance == nullptr) { // Create executable graph from captured graph.
+ CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0));
+ }
+
+ // Perform update to graph (if required for this token), and change copy parameter (required for every token)
+
+ if (cuda_graph_update_required) {
+ // Extract nodes from graph
+ if (cuda_ctx->cuda_graph->num_nodes == 0) {
+ // First call with null argument gets number of nodes in graph
+ CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, nullptr, &cuda_ctx->cuda_graph->num_nodes));
+ }
+ // Subsequent call with non-null argument gets nodes
+ cuda_ctx->cuda_graph->nodes.resize(cuda_ctx->cuda_graph->num_nodes);
+ cuda_ctx->cuda_graph->params.resize(cuda_ctx->cuda_graph->num_nodes);
+ if (cuda_ctx->cuda_graph->num_nodes > 0) {
+ CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, cuda_ctx->cuda_graph->nodes.data(), &cuda_ctx->cuda_graph->num_nodes));
+
+ // Loop over nodes, and extract kernel parameters from each node
+ for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) {
+ cudaGraphNodeType node_type;
+ CUDA_CHECK(cudaGraphNodeGetType(cuda_ctx->cuda_graph->nodes[i], &node_type));
+ if (node_type == cudaGraphNodeTypeKernel) {
+ cudaError_t stat = cudaGraphKernelNodeGetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i]); // Get params using runtime
+ if (stat == cudaErrorInvalidDeviceFunction) {
+ // Fails due to incorrect handling by CUDA runtime of CUDA BLAS node.
+ // We don't need to update blas nodes, so clear error and move on.
+ cudaGetLastError();
+ } else {
+ GGML_ASSERT(stat == cudaSuccess);
+ }
+ }
+ }
+ }
+ }
+
+ // One of the arguments to the copy kernel is updated for each token, hence we need to
+ // replace that argument with the updated value in the CUDA graph
+ if (!cuda_graph_update_required) { // on update steps, the live parameters will already be captured
+ int k = 0;
+ for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) {
+ if (cuda_ctx->cuda_graph->params[i].func == ggml_cuda_cpy_fn_ptr) {
+ char ** updated_kernel_arg_ptr = cuda_ctx->cuda_graph->updated_kernel_arg.at(k++);
+ cuda_ctx->cuda_graph->params[i].kernelParams[1] = updated_kernel_arg_ptr;
+ CUDA_CHECK(cudaGraphKernelNodeSetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i]));
+ }
+ }
+ }
+
+ // Update graph executable
+ cudaGraphExecUpdateResultInfo result_info;
+ cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info);
+ if (stat == cudaErrorGraphExecUpdateFailure) {
+#ifndef NDEBUG
+ fprintf(stderr, "%s: CUDA graph update failed\n", __func__);
+#endif
+ // The pre-existing graph exec cannot be updated due to violated constraints
+ // so instead clear error and re-instantiate
+ cudaGetLastError();
+ CUDA_CHECK(cudaGraphExecDestroy(cuda_ctx->cuda_graph->instance));
+ cuda_ctx->cuda_graph->instance = nullptr;
+ CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0));
+ } else {
+ GGML_ASSERT(stat == cudaSuccess);
+ }
+ // Launch graph
+ CUDA_CHECK(cudaGraphLaunch(cuda_ctx->cuda_graph->instance, cuda_ctx->stream()));
+#else
+ graph_evaluated_or_captured = true;
+#endif // USE_CUDA_GRAPH
}
return GGML_STATUS_SUCCESS;
diff --git a/ggml-cuda/clamp.cu b/ggml-cuda/clamp.cu
index 379ded042..8009a3e3d 100644
--- a/ggml-cuda/clamp.cu
+++ b/ggml-cuda/clamp.cu
@@ -31,5 +31,4 @@ void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream);
- CUDA_CHECK(cudaGetLastError());
}
diff --git a/ggml-cuda/common.cuh b/ggml-cuda/common.cuh
index 156eba6d1..44e67e040 100644
--- a/ggml-cuda/common.cuh
+++ b/ggml-cuda/common.cuh
@@ -19,6 +19,7 @@
#include
#include
#include
+#include
#if defined(GGML_USE_HIPBLAS)
#include
@@ -137,7 +138,8 @@
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
#define WARP_SIZE 32
-#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
+#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
+#define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons
#define CC_PASCAL 600
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
@@ -232,88 +234,6 @@ typedef float dfloat; // dequantize float
typedef float2 dfloat2;
#endif //GGML_CUDA_F16
-[[noreturn]]
-static __device__ void no_device_code(
- const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) {
-
-#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n",
- file_name, line, function_name, arch);
- GGML_UNUSED(arch_list);
-#else
- printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n",
- file_name, line, function_name, arch, arch_list);
-#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- __trap();
-
- GGML_UNUSED(no_device_code); // suppress unused function warning
-}
-
-#ifdef __CUDA_ARCH__
-#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__))
-#else
-#define NO_DEVICE_CODE //GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.")
-#endif // __CUDA_ARCH__
-
-static __device__ __forceinline__ float warp_reduce_sum(float x) {
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- x += __shfl_xor_sync(0xffffffff, x, mask, 32);
- }
- return x;
-}
-
-static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) {
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32);
- a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32);
- }
- return a;
-}
-
-static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
-#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32));
- }
- return a;
-#else
- GGML_UNUSED(a);
- NO_DEVICE_CODE;
-#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
-}
-
-static __device__ __forceinline__ float warp_reduce_max(float x) {
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
- }
- return x;
-}
-
-static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
-#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
- }
- return x;
-#else
- GGML_UNUSED(x);
- NO_DEVICE_CODE;
-#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
-}
-
-#if CUDART_VERSION < 12000
-static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
- const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
- const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
- return mask_low | mask_high;
-}
-#endif // CUDART_VERSION < 12000
-
#if defined(GGML_USE_HIPBLAS)
#define __CUDA_ARCH__ 1300
@@ -397,11 +317,143 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
}
#endif // defined(GGML_USE_HIPBLAS)
-#define FP16_AVAILABLE defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) ? \
- defined(RDNA1) || defined(RDNA2) || defined(RDNA3) : __CUDA_ARCH__ >= CC_PASCAL
+#define FP16_AVAILABLE (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= CC_PASCAL
#define FP16_MMA_AVAILABLE !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA
+static bool fp16_mma_available(const int cc) {
+ return cc < CC_OFFSET_AMD && cc >= CC_VOLTA;
+}
+
+[[noreturn]]
+static __device__ void no_device_code(
+ const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) {
+
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n",
+ file_name, line, function_name, arch);
+ GGML_UNUSED(arch_list);
+#else
+ printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n",
+ file_name, line, function_name, arch, arch_list);
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ __trap();
+
+ GGML_UNUSED(no_device_code); // suppress unused function warning
+}
+
+#ifdef __CUDA_ARCH__
+#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__))
+#else
+#define NO_DEVICE_CODE //GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.")
+#endif // __CUDA_ARCH__
+
+static __device__ __forceinline__ float warp_reduce_sum(float x) {
+#pragma unroll
+ for (int mask = 16; mask > 0; mask >>= 1) {
+ x += __shfl_xor_sync(0xffffffff, x, mask, 32);
+ }
+ return x;
+}
+
+static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) {
+#pragma unroll
+ for (int mask = 16; mask > 0; mask >>= 1) {
+ a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32);
+ a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32);
+ }
+ return a;
+}
+
+static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
+#if FP16_AVAILABLE
+
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#pragma unroll
+ for (int mask = 16; mask > 0; mask >>= 1) {
+ const half2 a_other = __shfl_xor_sync(0xffffffff, a, mask, 32);
+ reinterpret_cast(a.x) += __low2half(a_other);
+ reinterpret_cast(a.y) += __high2half(a_other);
+ }
+ return a;
+#else
+#pragma unroll
+ for (int mask = 16; mask > 0; mask >>= 1) {
+ a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32));
+ }
+ return a;
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+
+#else
+ NO_DEVICE_CODE;
+ return a;
+#endif // FP16_AVAILABLE
+}
+
+static __device__ __forceinline__ float warp_reduce_max(float x) {
+#pragma unroll
+ for (int mask = 16; mask > 0; mask >>= 1) {
+ x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
+ }
+ return x;
+}
+
+static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) {
+#if FP16_AVAILABLE
+
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
+ return __float2half(fmaxf(__half2float(a), __half2float(b)));
+#else
+ return __hmax(a, b);
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
+
+#else
+ NO_DEVICE_CODE;
+ GGML_UNUSED(b);
+ return a;
+#endif // FP16_AVAILABLE
+}
+
+static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) {
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+
+#if CUDART_VERSION >= CUDART_HMAX
+ return __hmax2(a, b);
+#else
+ half2 ret;
+ reinterpret_cast(ret.x) = __float2half(fmaxf( __low2float(a), __low2float(b)));
+ reinterpret_cast(ret.y) = __float2half(fmaxf(__high2float(a), __high2float(b)));
+ return ret;
+#endif // CUDART_VERSION >= CUDART_HMAX
+
+#else
+ GGML_UNUSED(a);
+ GGML_UNUSED(b);
+ NO_DEVICE_CODE;
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+}
+
+static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
+#pragma unroll
+ for (int mask = 16; mask > 0; mask >>= 1) {
+ x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
+ }
+ return x;
+#else
+ GGML_UNUSED(x);
+ NO_DEVICE_CODE;
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
+}
+
+#if CUDART_VERSION < CUDART_HMASK
+static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
+ const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
+ const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
+ return mask_low | mask_high;
+}
+#endif // CUDART_VERSION < 12000
+
// TODO: move to ggml-common.h
static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
@@ -491,6 +543,43 @@ struct ggml_tensor_extra_gpu {
cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs
};
+
+#if (CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS)
+#define USE_CUDA_GRAPH
+#endif
+
+struct ggml_graph_node_properties {
+ void * node_address;
+ ggml_op node_op;
+ int64_t ne[GGML_MAX_DIMS];
+ size_t nb[GGML_MAX_DIMS];
+ void * src_address[GGML_MAX_SRC];
+};
+
+struct ggml_cuda_graph {
+#ifdef USE_CUDA_GRAPH
+ ~ggml_cuda_graph() {
+ if (instance != nullptr) {
+ CUDA_CHECK(cudaGraphExecDestroy(instance));
+ }
+ if (graph != nullptr) {
+ CUDA_CHECK(cudaGraphDestroy(graph));
+ }
+ }
+ cudaGraph_t graph = nullptr;
+ cudaGraphExec_t instance = nullptr;
+ size_t num_nodes = 0;
+ std::vector nodes;
+ std::vector params;
+ bool disable_due_to_gpu_arch = false;
+ bool disable_due_to_too_many_updates = false;
+ bool disable_due_to_failed_graph_capture = false;
+ int number_consecutive_updates = 0;
+ std::vector ggml_graph_properties;
+ std::vector updated_kernel_arg;
+#endif
+};
+
struct ggml_backend_cuda_context {
int device;
std::string name;
@@ -499,6 +588,8 @@ struct ggml_backend_cuda_context {
cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } };
cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
+ std::unique_ptr cuda_graph;
+
explicit ggml_backend_cuda_context(int device) :
device(device),
name(GGML_CUDA_NAME + std::to_string(device)) {
diff --git a/ggml-cuda/convert.cu b/ggml-cuda/convert.cu
index 75e50c985..830e2d756 100644
--- a/ggml-cuda/convert.cu
+++ b/ggml-cuda/convert.cu
@@ -727,7 +727,6 @@ static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict_
}
to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
- int id;
switch (type) {
case GGML_TYPE_Q4_0:
return dequantize_row_q4_0_cuda;
@@ -738,8 +737,7 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
case GGML_TYPE_Q5_1:
return dequantize_block_cuda;
case GGML_TYPE_Q8_0:
- CUDA_CHECK(cudaGetDevice(&id));
- if (ggml_cuda_info().devices[id].cc >= CC_PASCAL) {
+ if (ggml_cuda_info().devices[ggml_cuda_get_device()].cc >= CC_PASCAL) {
return dequantize_block_q8_0_f16_cuda;
}
return dequantize_block_cuda;
diff --git a/ggml-cuda/cpy.cu b/ggml-cuda/cpy.cu
index 16d9c8fff..12d741f01 100644
--- a/ggml-cuda/cpy.cu
+++ b/ggml-cuda/cpy.cu
@@ -459,3 +459,32 @@ void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];
ggml_cuda_cpy(ctx, src0, dst);
}
+
+void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
+ return (void*) cpy_f32_f16;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
+ return (void*) cpy_f32_f16;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) {
+ return (void*) cpy_f32_q;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) {
+ return (void*) cpy_f32_q;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) {
+ return (void*) cpy_f32_q;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) {
+ return (void*) cpy_f32_q;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) {
+ return (void*) cpy_f32_q;
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) {
+ return (void*) cpy_f32_q;
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) {
+ return (void*) cpy_f32_f16;
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
+ return (void*) cpy_f32_f16;
+ } else {
+ fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__,
+ ggml_type_name(src0->type), ggml_type_name(src1->type));
+ GGML_ASSERT(false);
+ }
+}
+
diff --git a/ggml-cuda/cpy.cuh b/ggml-cuda/cpy.cuh
index f0b2c453b..796167426 100644
--- a/ggml-cuda/cpy.cuh
+++ b/ggml-cuda/cpy.cuh
@@ -5,3 +5,5 @@
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
+
+void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1);
diff --git a/ggml-cuda/fattn.cu b/ggml-cuda/fattn.cu
index df1e80068..7c486f482 100644
--- a/ggml-cuda/fattn.cu
+++ b/ggml-cuda/fattn.cu
@@ -11,8 +11,10 @@
#define HALF_MAX_HALF __float2half(65504.0f/2) // Use neg. of this instead of -INFINITY to initialize KQ max vals to avoid NaN upon subtraction.
#define SOFTMAX_FTZ_THRESHOLD -20.0f // Softmax exp. of values smaller than this are flushed to zero to avoid NaNs.
-template // D == head size
-__launch_bounds__(((D + WARP_SIZE - 1) / WARP_SIZE)*WARP_SIZE, 1)
+template // D == head size
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+__launch_bounds__(D, 1)
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
static __global__ void flash_attn_vec_ext_f16(
const char * __restrict__ Q,
const char * __restrict__ K,
@@ -44,55 +46,77 @@ static __global__ void flash_attn_vec_ext_f16(
#if FP16_AVAILABLE
//In this kernel Q, K, V are matrices while i, j, k are matrix indices.
- const int ic = blockIdx.x / parallel_blocks; // Index of the Q/QKV column to work on.
- const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
+ const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
+ const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
- const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic);
+ const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0);
const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio));
const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
- const half * maskh = (const half *) mask + ne11*ic;
+ const half * maskh = (const half *) mask + ne11*ic0;
const int stride_KV = nb11 / sizeof(half);
const int stride_KV2 = nb11 / sizeof(half2);
- constexpr int nwarps = (D + WARP_SIZE - 1) / WARP_SIZE;
+ static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
+ constexpr int nwarps = D / WARP_SIZE;
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
- __builtin_assume(tid < nwarps*WARP_SIZE);
+ __builtin_assume(tid < D);
- __shared__ half KQ[nwarps*WARP_SIZE];
- KQ[tid] = -INFINITY;
+ __shared__ half KQ[ncols*D];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ KQ[j*D + tid] = -HALF_MAX_HALF;
+ }
half2 * KQ2 = (half2 *) KQ;
- half kqmax = -HALF_MAX_HALF;
- half kqsum = 0.0f;
+ half kqmax[ncols];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqmax[j] = -HALF_MAX_HALF;
+ }
+ half kqsum[ncols] = {0.0f};
- __shared__ half kqmax_shared[WARP_SIZE];
- __shared__ half kqsum_shared[WARP_SIZE];
- if (threadIdx.y == 0) {
- kqmax_shared[threadIdx.x] = -HALF_MAX_HALF;
- kqsum_shared[threadIdx.x] = 0.0f;
+ __shared__ half kqmax_shared[ncols][WARP_SIZE];
+ __shared__ half kqsum_shared[ncols][WARP_SIZE];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ if (threadIdx.y == 0) {
+ kqmax_shared[j][threadIdx.x] = -HALF_MAX_HALF;
+ kqsum_shared[j][threadIdx.x] = 0.0f;
+ }
}
__syncthreads();
// Convert Q to half2 and store in registers:
- half2 Q_h2[(D/2 + WARP_SIZE - 1) / WARP_SIZE];
+ half2 Q_h2[ncols][D/(2*WARP_SIZE)];
#pragma unroll
- for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
- if (i0 + WARP_SIZE > D/2 && i >= D/2) {
- break;
- }
+ for (int j = 0; j < ncols; ++j) {
+#pragma unroll
+ for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
- Q_h2[i0/WARP_SIZE] = make_half2(scale, scale) * make_half2(Q_f2[i].x, Q_f2[i].y);
+ const float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i];
+ Q_h2[j][i0/WARP_SIZE] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y);
+ }
}
- half2 VKQ = make_half2(0.0f, 0.0f); // Each thread calculates a single VKQ value.
+ half2 VKQ[ncols] = {{0.0f, 0.0f}};
- const int k_start = parallel_blocks == 1 ? 0 : ip*D;
+ const int k_start = parallel_blocks == 1 ? 0 : ip*D;
for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
// Calculate KQ tile and keep track of new maximum KQ values:
- half kqmax_new = kqmax;
+
+ // For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression,
+ // see https://github.com/ggerganov/llama.cpp/pull/7061 .
+ // Therefore this variable is defined twice but only used once (so that the compiler can optimize out the unused variable).
+ half kqmax_new = kqmax[0];
+ half kqmax_new_arr[ncols];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqmax_new_arr[j] = kqmax[j];
+ }
+
#pragma unroll
for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
const int i_KQ = i_KQ_0 + threadIdx.y;
@@ -101,89 +125,112 @@ static __global__ void flash_attn_vec_ext_f16(
break;
}
- half2 sum2 = make_half2(0.0f, 0.0f);
+ half2 sum2[ncols] = {{0.0f, 0.0f}};
#pragma unroll
for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
const int k_KQ = k_KQ_0 + threadIdx.x;
- if (k_KQ_0 + WARP_SIZE > D/2 && k_KQ >= D/2) {
- break;
- }
const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ];
- sum2 += K_ik * Q_h2[k_KQ_0/WARP_SIZE];
- }
-
- sum2 = warp_reduce_sum(sum2);
- half sum = __low2half(sum2) + __high2half(sum2);
- sum += mask ? maskh[k_VKQ_0 + i_KQ] : __float2half(0.0f);
- kqmax_new = __hmax(kqmax_new, sum);
- if (threadIdx.x == 0) {
- KQ[i_KQ] = sum;
- }
- }
-
- kqmax_new = warp_reduce_max(kqmax_new);
- if (threadIdx.x == 0) {
- kqmax_shared[threadIdx.y] = kqmax_new;
- }
- __syncthreads();
- kqmax_new = kqmax_shared[threadIdx.x];
- kqmax_new = warp_reduce_max(kqmax_new);
-
- const half KQ_max_scale = hexp(kqmax - kqmax_new);
- kqmax = kqmax_new;
-
- const half val = hexp(KQ[tid] - kqmax);
- kqsum = kqsum*KQ_max_scale + val;
- KQ[tid] = val;
-
- VKQ *= __half2half2(KQ_max_scale);
-
- __syncthreads();
-
- if (tid < D) {
#pragma unroll
- for (int k0 = 0; k0 < D; k0 += 2) {
- if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k0 >= ne11) {
- break;
+ for (int j = 0; j < ncols; ++j) {
+ sum2[j] += K_ik * Q_h2[j][k_KQ_0/WARP_SIZE];
+ }
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ sum2[j] = warp_reduce_sum(sum2[j]);
+ half sum = __low2half(sum2[j]) + __high2half(sum2[j]);
+ sum += mask ? maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f);
+
+ if (ncols == 1) {
+ kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
+ } else {
+ kqmax_new_arr[j] = ggml_cuda_hmax(kqmax_new_arr[j], sum);
}
- half2 V_k;
- reinterpret_cast(V_k.x) = V_h[(k_VKQ_0 + k0 + 0)*stride_KV + tid];
- reinterpret_cast(V_k.y) = V_h[(k_VKQ_0 + k0 + 1)*stride_KV + tid];
- VKQ += V_k*KQ2[k0/2];
+ if (threadIdx.x == 0) {
+ KQ[j*D + i_KQ] = sum;
+ }
+ }
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j];
+
+ kqmax_new_j = warp_reduce_max(kqmax_new_j);
+ if (threadIdx.x == 0) {
+ kqmax_shared[j][threadIdx.y] = kqmax_new_j;
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ half kqmax_new_j = kqmax_shared[j][threadIdx.x];
+ kqmax_new_j = warp_reduce_max(kqmax_new_j);
+
+ const half KQ_max_scale = hexp(kqmax[j] - kqmax_new_j);
+ kqmax[j] = kqmax_new_j;
+
+ const half val = hexp(KQ[j*D + tid] - kqmax[j]);
+ kqsum[j] = kqsum[j]*KQ_max_scale + val;
+ KQ[j*D + tid] = val;
+
+ VKQ[j] *= __half2half2(KQ_max_scale);
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int k0 = 0; k0 < D; k0 += 2) {
+ if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k0 >= ne11) {
+ break;
+ }
+
+ half2 V_k;
+ reinterpret_cast(V_k.x) = V_h[(k_VKQ_0 + k0 + 0)*stride_KV + tid];
+ reinterpret_cast(V_k.y) = V_h[(k_VKQ_0 + k0 + 1)*stride_KV + tid];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ VKQ[j] += V_k*KQ2[j*(D/2) + k0/2];
}
}
__syncthreads();
}
- if (tid >= D) {
- kqsum = 0.0f;
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqsum[j] = warp_reduce_sum(kqsum[j]);
+ if (threadIdx.x == 0) {
+ kqsum_shared[j][threadIdx.y] = kqsum[j];
+ }
}
- kqsum = warp_reduce_sum(kqsum);
- if (threadIdx.x == 0) {
- kqsum_shared[threadIdx.y] = kqsum;
- }
__syncthreads();
- kqsum = kqsum_shared[threadIdx.x];
- kqsum = warp_reduce_sum(kqsum);
- if (tid >= D) {
- return;
+#pragma unroll
+ for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
+ kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
+ kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
+
+ half dst_val = (__low2half(VKQ[j_VKQ]) + __high2half(VKQ[j_VKQ]));
+ if (parallel_blocks == 1) {
+ dst_val /= kqsum[j_VKQ];
+ }
+ const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
+ dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
}
- half dst_val = (__low2half(VKQ) + __high2half(VKQ));
- if (parallel_blocks == 1) {
- dst_val /= kqsum;
+ if (parallel_blocks != 1 && tid != 0) {
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ dst_meta[(ic0 + j)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j], kqsum[j]);
+ }
}
- dst[D*gridDim.y*blockIdx.x + D*blockIdx.y + tid] = dst_val;
-
- if (parallel_blocks == 1 || tid != 0) {
- return;
- }
- dst_meta[ic*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax, kqsum);
#else
NO_DEVICE_CODE;
#endif // FP16_AVAILABLE
@@ -191,7 +238,9 @@ static __global__ void flash_attn_vec_ext_f16(
// D == head size, VKQ_stride == num VKQ rows calculated in parallel:
template
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
__launch_bounds__(nwarps*WARP_SIZE, 1)
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
static __global__ void flash_attn_ext_f16(
const char * __restrict__ Q,
const char * __restrict__ K,
@@ -416,9 +465,9 @@ static __global__ void flash_attn_ext_f16(
const int k = k0 + threadIdx.x;
KQ2_tmp[k0/WARP_SIZE] += mask ? mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
- KQ_max_new = __hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
+ KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
}
- KQ_max_new = __half2half2(warp_reduce_max(__hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
+ KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
@@ -573,7 +622,9 @@ static __global__ void flash_attn_ext_f16(
}
template // D == head size
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
__launch_bounds__(D, 1)
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
static __global__ void flash_attn_combine_results(
const float * __restrict__ VKQ_parts,
const float2 * __restrict__ VKQ_meta,
@@ -642,7 +693,7 @@ static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed.");
static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed.");
static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed.");
-template void launch_fattn_vec_f16(
+template void launch_fattn_vec_f16(
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask,
ggml_cuda_pool & pool, cudaStream_t main_stream
) {
@@ -656,13 +707,13 @@ template void launch_fattn_vec_f16(
constexpr int nwarps = (D + WARP_SIZE - 1) / WARP_SIZE;
const dim3 block_dim(WARP_SIZE, nwarps, 1);
- const dim3 blocks_num(parallel_blocks*Q->ne[1], Q->ne[2], Q->ne[3]);
+ const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]);
const int shmem = 0;
float scale;
memcpy(&scale, KQV->op_params, sizeof(float));
- flash_attn_vec_ext_f16
+ flash_attn_vec_ext_f16
<<>> (
(const char *) Q->data,
(const char *) K->data,
@@ -783,10 +834,99 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
ggml_cuda_set_device(ctx.device);
+ const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
const int32_t precision = KQV->op_params[1];
+ if (!fp16_mma_available(cc)) {
+ GGML_ASSERT(precision == GGML_PREC_DEFAULT);
+ GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
+
+ if (Q->ne[1] == 1) {
+ constexpr int cols_per_block = 1;
+ constexpr int parallel_blocks = 4;
+ switch (Q->ne[0]) {
+ case 64:
+ launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ case 128:
+ launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ return;
+ }
+
+ if (Q->ne[1] == 2) {
+ constexpr int cols_per_block = 2;
+ constexpr int parallel_blocks = 4;
+ switch (Q->ne[0]) {
+ case 64:
+ launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ case 128:
+ launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ return;
+ }
+
+ if (Q->ne[1] <= 4) {
+ constexpr int cols_per_block = 4;
+ constexpr int parallel_blocks = 4;
+ switch (Q->ne[0]) {
+ case 64:
+ launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ case 128:
+ launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ return;
+ }
+
+ if (Q->ne[1] <= 8) {
+ constexpr int cols_per_block = 8;
+ constexpr int parallel_blocks = 4;
+ switch (Q->ne[0]) {
+ case 64:
+ launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ case 128:
+ launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ return;
+ }
+
+ constexpr int cols_per_block = 8;
+ constexpr int parallel_blocks = 1;
+ switch (Q->ne[0]) {
+ case 64:
+ launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ case 128:
+ launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ return;
+ }
+
if (precision != GGML_PREC_DEFAULT) {
if (Q->ne[1] <= 32 || Q->ne[0] > 128) {
constexpr int cols_per_block = 16;
@@ -845,16 +985,17 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
}
if (Q->ne[1] == 1 && Q->ne[0] % (2*WARP_SIZE) == 0) {
+ constexpr int cols_per_block = 1;
constexpr int parallel_blocks = 4;
switch (Q->ne[0]) {
case 64:
- launch_fattn_vec_f16< 64, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
break;
case 128:
- launch_fattn_vec_f16<128, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
break;
case 256:
- launch_fattn_vec_f16<256, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
+ launch_fattn_vec_f16<256, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
break;
default:
GGML_ASSERT(false);
diff --git a/ggml-cuda/mmq.cu b/ggml-cuda/mmq.cu
index 60d6616a8..7948f1b12 100644
--- a/ggml-cuda/mmq.cu
+++ b/ggml-cuda/mmq.cu
@@ -1735,8 +1735,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -1780,8 +1779,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -1825,8 +1823,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -1870,8 +1867,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -1915,8 +1911,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -1960,8 +1955,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -2007,8 +2001,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda(
#if QK_K == 256
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -2053,8 +2046,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -2098,8 +2090,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
@@ -2143,8 +2134,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
const int compute_capability = ggml_cuda_info().devices[id].cc;
int mmq_x, mmq_y, nwarps;
diff --git a/ggml-cuda/mmvq.cu b/ggml-cuda/mmvq.cu
index 396559001..65cc1bcaa 100644
--- a/ggml-cuda/mmvq.cu
+++ b/ggml-cuda/mmvq.cu
@@ -89,8 +89,7 @@ static void mul_mat_vec_q_cuda(
GGML_ASSERT(ncols_x % qk == 0);
GGML_ASSERT(ncols_y <= MMVQ_MAX_BATCH_SIZE);
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
int64_t nwarps = 1;
int64_t rows_per_cuda_block = 1;
@@ -328,8 +327,7 @@ void ggml_cuda_op_mul_mat_vec_q(
const int64_t ne0 = dst->ne[0];
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ int id = ggml_cuda_get_device();
// the main device has a larger memory buffer to hold the results from all GPUs
// nrows_dst == nrows of the matrix that the kernel writes into
diff --git a/ggml-cuda/scale.cu b/ggml-cuda/scale.cu
index 6e3617d1c..1405e066e 100644
--- a/ggml-cuda/scale.cu
+++ b/ggml-cuda/scale.cu
@@ -28,5 +28,4 @@ void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
memcpy(&scale, dst->op_params, sizeof(float));
scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream);
- CUDA_CHECK(cudaGetLastError());
}
diff --git a/ggml-impl.h b/ggml-impl.h
index 94a1cc668..d85b152bf 100644
--- a/ggml-impl.h
+++ b/ggml-impl.h
@@ -17,6 +17,83 @@
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
+/**
+ * Converts brain16 to float32.
+ *
+ * The bfloat16 floating point format has the following structure:
+ *
+ * ┌sign
+ * │
+ * │ ┌exponent
+ * │ │
+ * │ │ ┌mantissa
+ * │ │ │
+ * │┌──┴───┐┌─┴───┐
+ * 0b0000000000000000 brain16
+ *
+ * Since bf16 has the same number of exponent bits as a 32bit float,
+ * encoding and decoding numbers becomes relatively straightforward.
+ *
+ * ┌sign
+ * │
+ * │ ┌exponent
+ * │ │
+ * │ │ ┌mantissa
+ * │ │ │
+ * │┌──┴───┐┌─┴───────────────────┐
+ * 0b00000000000000000000000000000000 IEEE binary32
+ *
+ * For comparison, the standard fp16 format has fewer exponent bits.
+ *
+ * ┌sign
+ * │
+ * │ ┌exponent
+ * │ │
+ * │ │ ┌mantissa
+ * │ │ │
+ * │┌─┴─┐┌─┴──────┐
+ * 0b0000000000000000 IEEE binary16
+ *
+ * @see IEEE 754-2008
+ */
+static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
+ union {
+ float f;
+ uint32_t i;
+ } u;
+ u.i = (uint32_t)h.bits << 16;
+ return u.f;
+}
+
+/**
+ * Converts float32 to brain16.
+ *
+ * This function is binary identical to AMD Zen4 VCVTNEPS2BF16.
+ * Subnormals shall be flushed to zero, and NANs will be quiet.
+ * This code should vectorize nicely if using modern compilers.
+ */
+static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
+ ggml_bf16_t h;
+ union {
+ float f;
+ uint32_t i;
+ } u;
+ u.f = s;
+ if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
+ h.bits = (u.i >> 16) | 64; /* force to quiet */
+ return h;
+ }
+ if (!(u.i & 0x7f800000)) { /* subnormal */
+ h.bits = (u.i & 0x80000000) >> 16; /* flush to zero */
+ return h;
+ }
+ h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
+ return h;
+}
+
+#define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
+#define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/ggml-metal.m b/ggml-metal.m
index 017b72ce9..c6817f01f 100644
--- a/ggml-metal.m
+++ b/ggml-metal.m
@@ -265,11 +265,20 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
static void * ggml_metal_host_malloc(size_t n) {
void * data = NULL;
+
+#if TARGET_OS_OSX
+ kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE);
+ if (err != KERN_SUCCESS) {
+ GGML_METAL_LOG_ERROR("%s: error: vm_allocate failed\n", __func__);
+ return NULL;
+ }
+#else
const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
if (result != 0) {
GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
return NULL;
}
+#endif
return data;
}
@@ -803,7 +812,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
case GGML_OP_DIAG_MASK_INF:
case GGML_OP_GET_ROWS:
{
- return op->ne[3] == 1;
+ return op->src[0]->type != GGML_TYPE_BF16 && op->ne[3] == 1;
}
default:
return false;
@@ -2840,7 +2849,11 @@ GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_
ggml_backend_metal_free_device();
if (ctx->owned) {
+#if TARGET_OS_OSX
+ vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)ctx->all_data, ctx->all_size);
+#else
free(ctx->all_data);
+#endif
}
free(ctx);
@@ -2944,14 +2957,16 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buff
ctx->owned = true;
ctx->n_buffers = 1;
- ctx->buffers[0].data = ctx->all_data;
- ctx->buffers[0].size = size;
- ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
- length:size_aligned
- options:MTLResourceStorageModeShared
- deallocator:nil];
+ if (ctx->all_data != NULL) {
+ ctx->buffers[0].data = ctx->all_data;
+ ctx->buffers[0].size = size;
+ ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
+ length:size_aligned
+ options:MTLResourceStorageModeShared
+ deallocator:nil];
+ }
- if (ctx->buffers[0].metal == nil) {
+ if (ctx->all_data == NULL || ctx->buffers[0].metal == nil) {
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
free(ctx);
ggml_backend_metal_free_device();
diff --git a/ggml-metal.metal b/ggml-metal.metal
index 3d4276ae0..46c7d5039 100644
--- a/ggml-metal.metal
+++ b/ggml-metal.metal
@@ -2175,7 +2175,7 @@ kernel void kernel_flash_attn_ext_f16(
const short D4 = D/4;
const short D8 = D/8;
- const short Q8 = Q/8;
+ //const short Q8 = Q/8;
const short NW = N_SIMDWIDTH;
const short SH = (C + Q); // shared memory per simdgroup in (half)
diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp
index b3f8b7eaf..880a14958 100644
--- a/ggml-opencl.cpp
+++ b/ggml-opencl.cpp
@@ -2119,6 +2119,7 @@ static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_
if (alignment == (cl_uint)-1) {
ggml_cl_init();
clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &alignment, NULL);
+ alignment /= 8; // bits to bytes
}
return alignment;
diff --git a/ggml-quants.c b/ggml-quants.c
index 444d1e55e..9883b6f8c 100644
--- a/ggml-quants.c
+++ b/ggml-quants.c
@@ -12450,6 +12450,24 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
const size_t nb = nbytes/ggml_type_size(type);
switch (type) {
+ case GGML_TYPE_BF16:
+ {
+ int nans = 0;
+ int infs = 0;
+ const unsigned short * f = (const unsigned short *) data;
+ for (size_t i = 0; i < nb; ++i) {
+ nans += (f[i] & 0x7fff) > 0x7f80;
+ infs += (f[i] & 0x7fff) == 0x7f80;
+ }
+ if (nans) {
+ fprintf(stderr, "%s: found %d NaNs in row of %zu BF16 values\n", __func__, nans, nb);
+ return false;
+ }
+ if (infs) {
+ fprintf(stderr, "%s: found %d infinities in row of %zu BF16 values\n", __func__, infs, nb);
+ return false;
+ }
+ } break;
case GGML_TYPE_F16:
{
const ggml_fp16_t * f = (const ggml_fp16_t *) data;
diff --git a/ggml.c b/ggml.c
index 74ecd5927..093d38d00 100644
--- a/ggml.c
+++ b/ggml.c
@@ -322,7 +322,7 @@ static ggml_fp16_t ggml_table_exp_f16[1 << 16];
// precomputed f32 table for f16 (256 KB) (ggml-impl.h)
float ggml_table_f32_f16[1 << 16];
-const char * ggml_status_to_string(enum ggml_status status) {
+GGML_CALL const char * ggml_status_to_string(enum ggml_status status) {
switch (status) {
case GGML_STATUS_ALLOC_FAILED: return "GGML status: error (failed to allocate memory)";
case GGML_STATUS_FAILED: return "GGML status: error (operation failed)";
@@ -333,16 +333,26 @@ const char * ggml_status_to_string(enum ggml_status status) {
return "GGML status: unknown";
}
-// note: do not use these inside ggml.c
-// these are meant to be used via the ggml.h API
float ggml_fp16_to_fp32(ggml_fp16_t x) {
+#define ggml_fp16_to_fp32 do_not_use__ggml_fp16_to_fp32__in_ggml
return GGML_FP16_TO_FP32(x);
}
ggml_fp16_t ggml_fp32_to_fp16(float x) {
+#define ggml_fp32_to_fp16 do_not_use__ggml_fp32_to_fp16__in_ggml
return GGML_FP32_TO_FP16(x);
}
+float ggml_bf16_to_fp32(ggml_bf16_t x) {
+#define ggml_bf16_to_fp32 do_not_use__ggml_bf16_to_fp32__in_ggml
+ return GGML_BF16_TO_FP32(x); // it just left shifts
+}
+
+ggml_bf16_t ggml_fp32_to_bf16(float x) {
+#define ggml_fp32_to_bf16 do_not_use__ggml_fp32_to_bf16__in_ggml
+ return GGML_FP32_TO_BF16(x);
+}
+
void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n) {
for (int64_t i = 0; i < n; i++) {
y[i] = GGML_FP16_TO_FP32(x[i]);
@@ -368,6 +378,49 @@ void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n) {
}
}
+void ggml_bf16_to_fp32_row(const ggml_bf16_t * x, float * y, int64_t n) {
+ int64_t i = 0;
+#if defined(__AVX512F__)
+ for (; i + 16 <= n; i += 16) {
+ _mm512_storeu_ps(y + i,
+ _mm512_castsi512_ps(
+ _mm512_slli_epi32(
+ _mm512_cvtepu16_epi32(
+ _mm256_loadu_si256(
+ (const __m256i *)(x + i))),
+ 16)));
+ }
+#elif defined(__AVX2__)
+ for (; i + 8 <= n; i += 8) {
+ _mm256_storeu_ps(y + i,
+ _mm256_castsi256_ps(
+ _mm256_slli_epi32(
+ _mm256_cvtepu16_epi32(
+ _mm_loadu_si128(
+ (const __m128i *)(x + i))),
+ 16)));
+ }
+#endif
+ for (; i < n; i++) {
+ y[i] = GGML_BF16_TO_FP32(x[i]);
+ }
+}
+
+void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) {
+ int i = 0;
+#if defined(__AVX512BF16__)
+ for (; i + 32 <= n; i += 32) {
+ _mm512_storeu_ps(
+ (__m512 *)(y + i),
+ (__m512)_mm512_cvtne2ps_pbh(_mm512_loadu_ps(x + i + 16),
+ _mm512_loadu_ps(x + i)));
+ }
+#endif
+ for (; i < n; i++) {
+ y[i] = GGML_FP32_TO_BF16(x[i]);
+ }
+}
+
bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) {
return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0;
}
@@ -503,6 +556,7 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
+static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc);
static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
[GGML_TYPE_I8] = {
@@ -845,6 +899,18 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.type_size = sizeof(block_q8_K),
.is_quantized = true,
.from_float = quantize_row_q8_K,
+ },
+ [GGML_TYPE_BF16] = {
+ .type_name = "bf16",
+ .blck_size = 1,
+ .type_size = sizeof(ggml_bf16_t),
+ .is_quantized = false,
+ .to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row,
+ .from_float = (ggml_from_float_t) ggml_fp32_to_bf16_row,
+ .from_float_reference = (ggml_from_float_t) ggml_fp32_to_bf16_row,
+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
+ .vec_dot_type = GGML_TYPE_BF16,
+ .nrows = 1,
}
};
@@ -1480,6 +1546,8 @@ inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) {
inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
+inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
+
inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
@@ -1498,7 +1566,7 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float *
UNUSED(by);
UNUSED(bs);
-#ifdef GGML_SIMD
+#if defined(GGML_SIMD)
float sumf = 0.0f;
const int np = (n & ~(GGML_F32_STEP - 1));
@@ -1534,6 +1602,70 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float *
*s = sumf;
}
+static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+ int i = 0;
+ ggml_float sumf = 0;
+
+#if defined(__AVX512BF16__)
+ __m512 c1 = _mm512_setzero_ps();
+ __m512 c2 = _mm512_setzero_ps();
+ for (; i + 64 <= n; i += 64) {
+ c1 = _mm512_dpbf16_ps(c1, (__m512bh)_mm512_loadu_ps((const float *)(x + i)),
+ (__m512bh)_mm512_loadu_ps((const float *)(y + i)));
+ c2 = _mm512_dpbf16_ps(c2, (__m512bh)_mm512_loadu_ps((const float *)(x + i + 32)),
+ (__m512bh)_mm512_loadu_ps((const float *)(y + i + 32)));
+ }
+ sumf += (ggml_float)_mm512_reduce_add_ps(c1);
+ sumf += (ggml_float)_mm512_reduce_add_ps(c2);
+
+#elif defined(__AVX512F__)
+#define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16))
+ __m512 c1 = _mm512_setzero_ps();
+ __m512 c2 = _mm512_setzero_ps();
+ for (; i + 32 <= n; i += 32) {
+ c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
+ c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2);
+ }
+ sumf += (ggml_float)_mm512_reduce_add_ps(c1);
+ sumf += (ggml_float)_mm512_reduce_add_ps(c2);
+
+#undef LOAD
+#elif defined(__AVX2__)
+#define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16))
+ __m256 c1 = _mm256_setzero_ps();
+ __m256 c2 = _mm256_setzero_ps();
+ __m256 c3 = _mm256_setzero_ps();
+ __m256 c4 = _mm256_setzero_ps();
+ for (; i + 32 <= n; i += 32) {
+ c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
+ c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2);
+ c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3);
+ c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4);
+ }
+ __m128 g;
+ c1 = _mm256_add_ps(_mm256_add_ps(c1, c3),
+ _mm256_add_ps(c2, c4));
+ g = _mm_add_ps(_mm256_extractf128_ps(c1, 1),
+ _mm256_castps256_ps128(c1));
+ g = _mm_add_ps(g, _mm_movehl_ps(g, g));
+ g = _mm_add_ss(g, _mm_movehdup_ps(g));
+ sumf += (ggml_float)_mm_cvtss_f32(g);
+
+#undef LOAD
+#endif
+
+ for (; i < n; ++i) {
+ sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) *
+ GGML_BF16_TO_FP32(y[i]));
+ }
+ *s = sumf;
+}
+
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
assert(nrc == 1);
UNUSED(nrc);
@@ -1967,6 +2099,14 @@ inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_
*s = sum;
}
+inline static void ggml_vec_sum_bf16_ggf(const int n, float * s, const ggml_bf16_t * x) {
+ float sum = 0.0f;
+ for (int i = 0; i < n; ++i) {
+ sum += GGML_BF16_TO_FP32(x[i]);
+ }
+ *s = sum;
+}
+
inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
#ifndef GGML_USE_ACCELERATE
float max = -INFINITY;
@@ -2377,7 +2517,7 @@ void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
// figure out which node we're on
uint current_cpu;
int getcpu_ret = 0;
-#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28)
+#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28) || defined(__COSMOPOLITAN__)
getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node);
#else
// old glibc doesn't have a wrapper for this call. Fall back on direct syscall
@@ -2588,6 +2728,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
switch (ftype) {
case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
+ case GGML_FTYPE_MOSTLY_BF16: wtype = GGML_TYPE_BF16; break;
case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
@@ -2729,15 +2870,16 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
{
const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
- ggml_fp16_t ii;
for (int i = 0; i < (1 << 16); ++i) {
- uint16_t ui = i;
- memcpy(&ii, &ui, sizeof(ii));
- const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
+ union {
+ uint16_t u16;
+ ggml_fp16_t fp16;
+ } u = {i};
+ float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16);
ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
- ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
+ ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
}
const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
@@ -3201,6 +3343,13 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
}
} break;
+ case GGML_TYPE_BF16:
+ {
+ assert(tensor->nb[0] == sizeof(ggml_fp16_t));
+ for (int i = 0; i < n; i++) {
+ ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
+ }
+ } break;
case GGML_TYPE_F32:
{
assert(tensor->nb[0] == sizeof(float));
@@ -3253,6 +3402,13 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
}
} break;
+ case GGML_TYPE_BF16:
+ {
+ assert(tensor->nb[0] == sizeof(ggml_bf16_t));
+ for (int i = 0; i < n; i++) {
+ ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
+ }
+ } break;
case GGML_TYPE_F32:
{
assert(tensor->nb[0] == sizeof(float));
@@ -3320,6 +3476,11 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
}
+ case GGML_TYPE_BF16:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
+ }
case GGML_TYPE_F32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(float));
@@ -3362,6 +3523,11 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
} break;
+ case GGML_TYPE_BF16:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
+ ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
+ } break;
case GGML_TYPE_F32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(float));
@@ -3385,6 +3551,8 @@ int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i
return ((int32_t *) data)[0];
case GGML_TYPE_F16:
return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
+ case GGML_TYPE_BF16:
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
case GGML_TYPE_F32:
return ((float *) data)[0];
default:
@@ -3413,6 +3581,10 @@ void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2,
{
((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
} break;
+ case GGML_TYPE_BF16:
+ {
+ ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
+ } break;
case GGML_TYPE_F32:
{
((float *)(data))[0] = value;
@@ -3451,6 +3623,11 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
}
+ case GGML_TYPE_BF16:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
+ }
case GGML_TYPE_F32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(float));
@@ -3493,6 +3670,11 @@ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
} break;
+ case GGML_TYPE_BF16:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
+ ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
+ } break;
case GGML_TYPE_F32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(float));
@@ -3516,6 +3698,8 @@ float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2,
return ((int32_t *) data)[0];
case GGML_TYPE_F16:
return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
+ case GGML_TYPE_BF16:
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
case GGML_TYPE_F32:
return ((float *) data)[0];
default:
@@ -3544,6 +3728,10 @@ void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2,
{
((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
} break;
+ case GGML_TYPE_BF16:
+ {
+ ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
+ } break;
case GGML_TYPE_F32:
{
((float *)(data))[0] = value;
@@ -3738,7 +3926,11 @@ static struct ggml_tensor * ggml_add_cast_impl(
// TODO: support less-strict constraint
// GGML_ASSERT(ggml_can_repeat(b, a));
GGML_ASSERT(ggml_can_repeat_rows(b, a));
- GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16
+
+ // currently only supported for quantized input and f16
+ GGML_ASSERT(ggml_is_quantized(a->type) ||
+ a->type == GGML_TYPE_F16 ||
+ a->type == GGML_TYPE_BF16);
bool is_node = false;
@@ -7215,8 +7407,8 @@ static void ggml_compute_forward_dup_same_cont(
((char *) src0->data + ie0*nb00),
(ie1 - ie0) * ggml_type_size(src0->type));
}
-
}
+
static void ggml_compute_forward_dup_f16(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -7490,6 +7682,366 @@ static void ggml_compute_forward_dup_f16(
}
}
+static void ggml_compute_forward_dup_bf16(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+
+ GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ GGML_TENSOR_UNARY_OP_LOCALS
+
+ const int ith = params->ith; // thread index
+ const int nth = params->nth; // number of threads
+
+ if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
+ ggml_compute_forward_dup_same_cont(params, dst);
+ return;
+ }
+
+ // parallelize by rows
+ const int nr = ne01;
+ // number of rows per thread
+ const int dr = (nr + nth - 1) / nth;
+ // row range for this thread
+ const int ir0 = dr * ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ if (src0->type == dst->type &&
+ ne00 == ne0 &&
+ nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
+ // copy by rows
+ const size_t rs = ne00*nb00;
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ for (int64_t i01 = ir0; i01 < ir1; i01++) {
+ memcpy(
+ ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
+ ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
+ rs);
+ }
+ }
+ }
+ return;
+ }
+
+ // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
+
+ if (ggml_is_contiguous(dst)) {
+ if (nb00 == sizeof(ggml_bf16_t)) {
+ if (dst->type == GGML_TYPE_BF16) {
+ size_t id = 0;
+ const size_t rs = ne00 * nb00;
+ char * dst_ptr = (char *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += rs * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
+ memcpy(dst_ptr + id, src0_ptr, rs);
+ id += rs;
+ }
+ id += rs * (ne01 - ir1);
+ }
+ }
+ } else if (dst->type == GGML_TYPE_F16) {
+ size_t id = 0;
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += ne00 * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
+ for (int i00 = 0; i00 < ne00; i00++) {
+ dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00]));
+ id++;
+ }
+ }
+ id += ne00 * (ne01 - ir1);
+ }
+ }
+ } else if (dst->type == GGML_TYPE_F32) {
+ size_t id = 0;
+ float * dst_ptr = (float *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += ne00 * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
+ for (int i00 = 0; i00 < ne00; i00++) {
+ dst_ptr[id] = GGML_BF16_TO_FP32(src0_ptr[i00]);
+ id++;
+ }
+ }
+ id += ne00 * (ne01 - ir1);
+ }
+ }
+ } else if (type_traits[dst->type].from_float) {
+ ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
+ float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
+
+ size_t id = 0;
+ size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
+ char * dst_ptr = (char *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += rs * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
+
+ for (int i00 = 0; i00 < ne00; i00++) {
+ src0_f32[i00] = GGML_BF16_TO_FP32(src0_ptr[i00]);
+ }
+
+ quantize_row_q(src0_f32, dst_ptr + id, ne00);
+ id += rs;
+ }
+ id += rs * (ne01 - ir1);
+ }
+ }
+ } else {
+ GGML_ASSERT(false); // TODO: implement
+ }
+ } else {
+ //printf("%s: this is not optimal - fix me\n", __func__);
+
+ if (dst->type == GGML_TYPE_F32) {
+ size_t id = 0;
+ float * dst_ptr = (float *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += ne00 * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ for (int i00 = 0; i00 < ne00; i00++) {
+ const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+
+ dst_ptr[id] = GGML_BF16_TO_FP32(*src0_ptr);
+ id++;
+ }
+ }
+ id += ne00 * (ne01 - ir1);
+ }
+ }
+ } else if (dst->type == GGML_TYPE_BF16) {
+ size_t id = 0;
+ ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += ne00 * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ for (int i00 = 0; i00 < ne00; i00++) {
+ const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+
+ dst_ptr[id] = *src0_ptr;
+ id++;
+ }
+ }
+ id += ne00 * (ne01 - ir1);
+ }
+ }
+ } else if (dst->type == GGML_TYPE_F16) {
+ size_t id = 0;
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += ne00 * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ for (int i00 = 0; i00 < ne00; i00++) {
+ const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+
+ dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr));
+ id++;
+ }
+ }
+ id += ne00 * (ne01 - ir1);
+ }
+ }
+ } else {
+ GGML_ASSERT(false); // TODO: implement
+ }
+ }
+ return;
+ }
+
+ // dst counters
+ int64_t i10 = 0;
+ int64_t i11 = 0;
+ int64_t i12 = 0;
+ int64_t i13 = 0;
+
+ if (dst->type == GGML_TYPE_BF16) {
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ i10 += ne00 * ir0;
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ for (int64_t i01 = ir0; i01 < ir1; i01++) {
+ for (int64_t i00 = 0; i00 < ne00; i00++) {
+ const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+ char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
+
+ memcpy(dst_ptr, src0_ptr, sizeof(ggml_bf16_t));
+
+ if (++i10 == ne00) {
+ i10 = 0;
+ if (++i11 == ne01) {
+ i11 = 0;
+ if (++i12 == ne02) {
+ i12 = 0;
+ if (++i13 == ne03) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ i10 += ne00 * (ne01 - ir1);
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ } else if (dst->type == GGML_TYPE_F16) {
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ i10 += ne00 * ir0;
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ for (int64_t i01 = ir0; i01 < ir1; i01++) {
+ for (int64_t i00 = 0; i00 < ne00; i00++) {
+ const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+ char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
+
+ *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr));
+
+ if (++i10 == ne0) {
+ i10 = 0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ i10 += ne00 * (ne01 - ir1);
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ } else if (dst->type == GGML_TYPE_F32) {
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ i10 += ne00 * ir0;
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ for (int64_t i01 = ir0; i01 < ir1; i01++) {
+ for (int64_t i00 = 0; i00 < ne00; i00++) {
+ const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+ char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
+
+ *(float *) dst_ptr = GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr);
+
+ if (++i10 == ne0) {
+ i10 = 0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ i10 += ne00 * (ne01 - ir1);
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ GGML_ASSERT(false); // TODO: implement
+ }
+}
+
static void ggml_compute_forward_dup_f32(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -7617,6 +8169,24 @@ static void ggml_compute_forward_dup_f32(
id += ne00 * (ne01 - ir1);
}
}
+ } else if (dst->type == GGML_TYPE_BF16) {
+ size_t id = 0;
+ ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ id += ne00 * ir0;
+ for (int i01 = ir0; i01 < ir1; i01++) {
+ for (int i00 = 0; i00 < ne00; i00++) {
+ const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+
+ dst_ptr[id] = GGML_FP32_TO_BF16(*src0_ptr);
+ id++;
+ }
+ }
+ id += ne00 * (ne01 - ir1);
+ }
+ }
} else {
GGML_ASSERT(false); // TODO: implement
}
@@ -7736,6 +8306,58 @@ static void ggml_compute_forward_dup_f32(
}
}
}
+ } else if (dst->type == GGML_TYPE_BF16) {
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ i10 += ne00 * ir0;
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ for (int64_t i01 = ir0; i01 < ir1; i01++) {
+ for (int64_t i00 = 0; i00 < ne00; i00++) {
+ const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
+ char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
+
+ *(ggml_bf16_t *) dst_ptr = GGML_FP32_TO_BF16(*(const float *) src0_ptr);
+
+ if (++i10 == ne0) {
+ i10 = 0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ i10 += ne00 * (ne01 - ir1);
+ while (i10 >= ne0) {
+ i10 -= ne0;
+ if (++i11 == ne1) {
+ i11 = 0;
+ if (++i12 == ne2) {
+ i12 = 0;
+ if (++i13 == ne3) {
+ i13 = 0;
+ }
+ }
+ }
+ }
+ }
+ }
} else {
GGML_ASSERT(false); // TODO: implement
}
@@ -7909,6 +8531,10 @@ static void ggml_compute_forward_dup(
{
ggml_compute_forward_dup_f16(params, dst);
} break;
+ case GGML_TYPE_BF16:
+ {
+ ggml_compute_forward_dup_bf16(params, dst);
+ } break;
case GGML_TYPE_F32:
{
ggml_compute_forward_dup_f32(params, dst);
@@ -8091,6 +8717,85 @@ static void ggml_compute_forward_add_f16_f32(
}
}
+static void ggml_compute_forward_add_bf16_f32(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ GGML_ASSERT(src0->type == GGML_TYPE_BF16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+
+ if (dst->type == GGML_TYPE_F32) {
+ GGML_ASSERT( nb0 == sizeof(float));
+ }
+ else {
+ GGML_ASSERT(dst->type == GGML_TYPE_BF16);
+ GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
+ }
+
+ GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ if (nb10 == sizeof(float)) {
+ if (dst->type == GGML_TYPE_BF16) {
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
+ }
+ }
+ } else {
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
+ }
+ }
+ }
+ }
+ else {
+ // src1 is not contiguous
+ GGML_ASSERT(false);
+ }
+}
+
static void ggml_compute_forward_add_f16_f16(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -8147,6 +8852,62 @@ static void ggml_compute_forward_add_f16_f16(
}
}
+static void ggml_compute_forward_add_bf16_bf16(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ GGML_ASSERT(src0->type == GGML_TYPE_BF16);
+ GGML_ASSERT(src1->type == GGML_TYPE_BF16);
+ GGML_ASSERT(dst->type == GGML_TYPE_BF16);
+
+ GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
+ GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ if (nb10 == sizeof(ggml_bf16_t)) {
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ ggml_bf16_t * src1_ptr = (ggml_bf16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + GGML_BF16_TO_FP32(src1_ptr[i]));
+ }
+ }
+ }
+ else {
+ // src1 is not contiguous
+ GGML_ASSERT(false);
+ }
+}
+
static void ggml_compute_forward_add_q_f32(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -8256,6 +9017,18 @@ static void ggml_compute_forward_add(
GGML_ASSERT(false);
}
} break;
+ case GGML_TYPE_BF16:
+ {
+ if (src1->type == GGML_TYPE_BF16) {
+ ggml_compute_forward_add_bf16_bf16(params, dst);
+ }
+ else if (src1->type == GGML_TYPE_F32) {
+ ggml_compute_forward_add_bf16_f32(params, dst);
+ }
+ else {
+ GGML_ASSERT(false);
+ }
+ } break;
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q5_0:
@@ -8514,6 +9287,110 @@ static void ggml_compute_forward_add1_q_f32(
}
}
+static void ggml_compute_forward_add1_bf16_f32(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_scalar(src1));
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ // scalar to add
+ const float v = *(float *) src1->data;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+
+ GGML_TENSOR_UNARY_OP_LOCALS
+
+ GGML_ASSERT(src0->type == GGML_TYPE_BF16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_BF16);
+
+ GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
+ GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v);
+ }
+ }
+}
+
+static void ggml_compute_forward_add1_bf16_bf16(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_scalar(src1));
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ // scalar to add
+ const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data);
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+
+ GGML_TENSOR_UNARY_OP_LOCALS
+
+ GGML_ASSERT(src0->type == GGML_TYPE_BF16);
+ GGML_ASSERT(src1->type == GGML_TYPE_BF16);
+ GGML_ASSERT(dst->type == GGML_TYPE_BF16);
+
+ GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
+ GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v);
+ }
+ }
+}
+
static void ggml_compute_forward_add1(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -8538,6 +9415,18 @@ static void ggml_compute_forward_add1(
GGML_ASSERT(false);
}
} break;
+ case GGML_TYPE_BF16:
+ {
+ if (src1->type == GGML_TYPE_BF16) {
+ ggml_compute_forward_add1_bf16_bf16(params, dst);
+ }
+ else if (src1->type == GGML_TYPE_F32) {
+ ggml_compute_forward_add1_bf16_f32(params, dst);
+ }
+ else {
+ GGML_ASSERT(false);
+ }
+ } break;
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q5_0:
@@ -8666,6 +9555,7 @@ static void ggml_compute_forward_acc(
ggml_compute_forward_acc_f32(params, dst);
} break;
case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q5_0:
@@ -9187,6 +10077,40 @@ static void ggml_compute_forward_sum_f16(
((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
}
+static void ggml_compute_forward_sum_bf16(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+
+ assert(params->ith == 0);
+ assert(ggml_is_scalar(dst));
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ assert(src0->nb[0] == sizeof(ggml_bf16_t));
+
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
+
+ float sum = 0;
+ float row_sum = 0;
+
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ for (int64_t i01 = 0; i01 < ne01; i01++) {
+ ggml_vec_sum_bf16_ggf(ne00,
+ &row_sum,
+ (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
+ sum += row_sum;
+ }
+ }
+ }
+ ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum);
+}
+
static void ggml_compute_forward_sum(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -9202,6 +10126,10 @@ static void ggml_compute_forward_sum(
{
ggml_compute_forward_sum_f16(params, dst);
} break;
+ case GGML_TYPE_BF16:
+ {
+ ggml_compute_forward_sum_bf16(params, dst);
+ } break;
default:
{
GGML_ASSERT(false);
@@ -9476,6 +10404,7 @@ static void ggml_compute_forward_repeat(
switch (src0->type) {
case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
case GGML_TYPE_I16:
{
ggml_compute_forward_repeat_f16(params, dst);
@@ -11793,6 +12722,7 @@ static void ggml_compute_forward_set(
ggml_compute_forward_set_f32(params, dst);
} break;
case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q5_0:
@@ -11967,6 +12897,49 @@ static void ggml_compute_forward_get_rows_f16(
}
}
+static void ggml_compute_forward_get_rows_bf16(
+ const struct ggml_compute_params * params,
+ struct ggml_tensor * dst) {
+
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
+ return;
+ }
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const int64_t nc = ne00;
+ const int64_t nr = ggml_nelements(src1);
+
+ assert(ne0 == nc);
+ assert(ne02 == ne11);
+ assert(nb00 == sizeof(ggml_bf16_t));
+ assert(ggml_nrows(dst) == nr);
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int64_t i = ir0; i < ir1; ++i) {
+ const int64_t i12 = i/(ne11*ne10);
+ const int64_t i11 = (i - i12*ne11*ne10)/ne10;
+ const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
+ const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
+
+ ggml_bf16_to_fp32_row(
+ (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
+ (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
+ }
+}
+
static void ggml_compute_forward_get_rows_f32(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@@ -12044,6 +13017,10 @@ static void ggml_compute_forward_get_rows(
{
ggml_compute_forward_get_rows_f16(params, dst);
} break;
+ case GGML_TYPE_BF16:
+ {
+ ggml_compute_forward_get_rows_bf16(params, dst);
+ } break;
case GGML_TYPE_F32:
case GGML_TYPE_I32:
{
@@ -12739,6 +13716,7 @@ static void ggml_compute_forward_alibi(
{
ggml_compute_forward_alibi_f32(params, dst);
} break;
+ case GGML_TYPE_BF16:
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q5_0:
@@ -12828,6 +13806,7 @@ static void ggml_compute_forward_clamp(
ggml_compute_forward_clamp_f32(params, dst);
} break;
case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q5_0:
@@ -15921,6 +16900,7 @@ static void ggml_compute_forward_get_rel_pos(
switch (src0->type) {
case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
{
ggml_compute_forward_get_rel_pos_f16(params, dst);
} break;
@@ -18785,7 +19765,10 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
case GGML_OP_CPY:
case GGML_OP_DUP:
{
- if (ggml_is_quantized(node->type)) {
+ if (ggml_is_quantized(node->type) ||
+ // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32
+ (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) ||
+ (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) {
cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
}
} break;
@@ -18864,7 +19847,8 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
const int64_t ne10 = node->src[1]->ne[0]; // L
const int64_t ne11 = node->src[1]->ne[1]; // Cin
- if (node->src[0]->type == GGML_TYPE_F16 &&
+ if ((node->src[0]->type == GGML_TYPE_F16 ||
+ node->src[0]->type == GGML_TYPE_BF16) &&
node->src[1]->type == GGML_TYPE_F32) {
cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
cur += sizeof(ggml_fp16_t)*ne10*ne11;
@@ -18900,6 +19884,9 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
} else if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
+ } else if (node->src[1]->type == GGML_TYPE_BF16) {
+ cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
+ cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
}
} break;
case GGML_OP_FLASH_ATTN_EXT:
@@ -18916,6 +19903,9 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
} else if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
+ } else if (node->src[1]->type == GGML_TYPE_BF16) {
+ cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
+ cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
}
} break;
case GGML_OP_FLASH_ATTN_BACK:
@@ -18929,6 +19919,9 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
} else if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
+ } else if (node->src[1]->type == GGML_TYPE_BF16) {
+ cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
+ cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
}
} break;
@@ -19705,7 +20698,9 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph
if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
fprintf(fp, "%d", ggml_get_i32_1d(node, j));
}
- else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
+ else if (node->type == GGML_TYPE_F32 ||
+ node->type == GGML_TYPE_F16 ||
+ node->type == GGML_TYPE_BF16) {
fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
}
else {
@@ -20763,6 +21758,12 @@ size_t ggml_quantize_chunk(
ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
result = n * elemsize;
} break;
+ case GGML_TYPE_BF16:
+ {
+ size_t elemsize = sizeof(ggml_bf16_t);
+ ggml_fp32_to_bf16_row(src + start, (ggml_bf16_t *)dst + start, n);
+ result = n * elemsize;
+ } break;
case GGML_TYPE_F32:
{
size_t elemsize = sizeof(float);
@@ -21139,7 +22140,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
}
// read the tensor infos
- {
+ if (ctx->header.n_tensors > 0) {
ctx->infos = GGML_CALLOC(ctx->header.n_tensors, sizeof(struct gguf_tensor_info));
for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
diff --git a/ggml.h b/ggml.h
index a11795973..fe6053822 100644
--- a/ggml.h
+++ b/ggml.h
@@ -326,14 +326,20 @@ extern "C" {
// get ggml_status name string
GGML_API GGML_CALL const char * ggml_status_to_string(enum ggml_status status);
+ // ieee 754-2008 half-precision float16
+ // todo: make this not an integral type
typedef uint16_t ggml_fp16_t;
+ GGML_API float ggml_fp16_to_fp32(ggml_fp16_t);
+ GGML_API ggml_fp16_t ggml_fp32_to_fp16(float);
+ GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t *, float *, int64_t);
+ GGML_API void ggml_fp32_to_fp16_row(const float *, ggml_fp16_t *, int64_t);
- // convert FP16 <-> FP32
- GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
- GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
-
- GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n);
- GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n);
+ // google brain half-precision bfloat16
+ typedef struct { uint16_t bits; } ggml_bf16_t;
+ GGML_API ggml_bf16_t ggml_fp32_to_bf16(float);
+ GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16
+ GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t);
+ GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t);
struct ggml_object;
struct ggml_context;
@@ -370,6 +376,7 @@ extern "C" {
GGML_TYPE_I64 = 27,
GGML_TYPE_F64 = 28,
GGML_TYPE_IQ1_M = 29,
+ GGML_TYPE_BF16 = 30,
GGML_TYPE_COUNT,
};
@@ -410,6 +417,7 @@ extern "C" {
GGML_FTYPE_MOSTLY_IQ2_S = 21, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
+ GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
};
// available tensor operations:
diff --git a/ggml_vk_generate_shaders.py b/ggml_vk_generate_shaders.py
index 5dd700963..1d9a0cc82 100644
--- a/ggml_vk_generate_shaders.py
+++ b/ggml_vk_generate_shaders.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
+import logging
import argparse
import asyncio
import os
import sys
from tempfile import gettempdir, NamedTemporaryFile
+logger = logging.getLogger("ggml-vk-generate-shaders")
+
shader_f32 = """
#define FLOAT_TYPE float
"""
@@ -2498,7 +2501,7 @@ async def string_to_spv(name, code, defines, fp16=True):
stdout, stderr = await proc.communicate()
- print(" ".join(cmd))
+ logger.info(" ".join(cmd))
if proc.returncode:
raise RuntimeError(f"{name=} {f.name=} {stdout=} {stderr=}")
@@ -2507,7 +2510,7 @@ async def string_to_spv(name, code, defines, fp16=True):
cmd.extend([f"-D{key}={value}" for key, value in defines.items()])
code_with_lines = "\n".join([f"{i + 1}: {line}" for i, line in enumerate(preprocessed_code.splitlines())])
- print(f"ERROR compiling {name}\n\n{code_with_lines}\n\n{error}")
+ logger.error(f"cannot compile {name}\n\n{code_with_lines}\n\n{error}")
f.close()
os.remove(f.name)
sys.exit(proc.returncode)
@@ -2520,7 +2523,7 @@ async def string_to_spv(name, code, defines, fp16=True):
async def main():
- print("ggml_vulkan: Generating and compiling shaders to SPIR-V")
+ logger.info("ggml_vulkan: Generating and compiling shaders to SPIR-V")
tasks = []
@@ -2768,9 +2771,12 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GGML Vulkan Shader Generator")
parser.add_argument("--glslc", help="Path to glslc")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
args = parser.parse_args()
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
if args.glslc:
GLSLC = args.glslc
diff --git a/gguf-py/examples/reader.py b/gguf-py/examples/reader.py
index 62e0769da..d841048c6 100644
--- a/gguf-py/examples/reader.py
+++ b/gguf-py/examples/reader.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python3
+import logging
import sys
from pathlib import Path
from gguf.gguf_reader import GGUFReader
+logger = logging.getLogger("reader")
sys.path.insert(0, str(Path(__file__).parent.parent))
@@ -18,28 +20,28 @@ def read_gguf_file(gguf_file_path):
reader = GGUFReader(gguf_file_path)
# List all key-value pairs in a columnized format
- print("Key-Value Pairs:")
+ print("Key-Value Pairs:") # noqa: NP100
max_key_length = max(len(key) for key in reader.fields.keys())
for key, field in reader.fields.items():
value = field.parts[field.data[0]]
- print(f"{key:{max_key_length}} : {value}")
- print("----")
+ print(f"{key:{max_key_length}} : {value}") # noqa: NP100
+ print("----") # noqa: NP100
# List all tensors
- print("Tensors:")
+ print("Tensors:") # noqa: NP100
tensor_info_format = "{:<30} | Shape: {:<15} | Size: {:<12} | Quantization: {}"
- print(tensor_info_format.format("Tensor Name", "Shape", "Size", "Quantization"))
- print("-" * 80)
+ print(tensor_info_format.format("Tensor Name", "Shape", "Size", "Quantization")) # noqa: NP100
+ print("-" * 80) # noqa: NP100
for tensor in reader.tensors:
shape_str = "x".join(map(str, tensor.shape))
size_str = str(tensor.n_elements)
quantization_str = tensor.tensor_type.name
- print(tensor_info_format.format(tensor.name, shape_str, size_str, quantization_str))
+ print(tensor_info_format.format(tensor.name, shape_str, size_str, quantization_str)) # noqa: NP100
if __name__ == '__main__':
if len(sys.argv) < 2:
- print("Usage: reader.py ")
+ logger.info("Usage: reader.py ")
sys.exit(1)
gguf_file_path = sys.argv[1]
read_gguf_file(gguf_file_path)
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 047882818..4fc103772 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -1,6 +1,5 @@
from __future__ import annotations
-import sys
from enum import Enum, IntEnum, auto
from typing import Any
@@ -843,6 +842,7 @@ class GGMLQuantizationType(IntEnum):
I64 = 27
F64 = 28
IQ1_M = 29
+ BF16 = 30
class GGUFEndian(IntEnum):
@@ -879,14 +879,13 @@ class GGUFValueType(IntEnum):
return GGUFValueType.INT32
# TODO: need help with 64-bit types in Python
else:
- print("Unknown type:", type(val))
- sys.exit()
+ raise ValueError(f"Unknown type: {type(val)}")
# Note: Does not support GGML_QKK_64
QK_K = 256
# Items here are (block size, type size)
-GGML_QUANT_SIZES = {
+GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = {
GGMLQuantizationType.F32: (1, 4),
GGMLQuantizationType.F16: (1, 2),
GGMLQuantizationType.Q4_0: (32, 2 + 16),
@@ -915,6 +914,7 @@ GGML_QUANT_SIZES = {
GGMLQuantizationType.I64: (1, 8),
GGMLQuantizationType.F64: (1, 8),
GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32),
+ GGMLQuantizationType.BF16: (1, 2),
}
diff --git a/gguf-py/gguf/gguf_reader.py b/gguf-py/gguf/gguf_reader.py
index 2bdb15525..21b089f8a 100644
--- a/gguf-py/gguf/gguf_reader.py
+++ b/gguf-py/gguf/gguf_reader.py
@@ -4,6 +4,7 @@
#
from __future__ import annotations
+import logging
import os
from collections import OrderedDict
from typing import Any, Literal, NamedTuple, TypeVar, Union
@@ -27,6 +28,7 @@ from gguf.constants import (
GGUFValueType,
)
+logger = logging.getLogger(__name__)
READER_SUPPORTED_VERSIONS = [2, GGUF_VERSION]
@@ -63,7 +65,7 @@ class ReaderTensor(NamedTuple):
class GGUFReader:
# I - same as host, S - swapped
- byte_order: Literal['I' | 'S'] = 'I'
+ byte_order: Literal['I'] | Literal['S'] = 'I'
alignment: int = GGUF_DEFAULT_ALIGNMENT
# Note: Internal helper, API may change.
@@ -81,7 +83,7 @@ class GGUFReader:
GGUFValueType.BOOL: np.bool_,
}
- def __init__(self, path: os.PathLike[str] | str, mode: Literal['r' | 'r+' | 'c'] = 'r'):
+ def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'):
self.data = np.memmap(path, mode = mode)
offs = 0
if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC:
@@ -126,7 +128,7 @@ class GGUFReader:
return self.tensors[idx]
def _get(
- self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I' | 'S' | '<'] = None,
+ self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I'] | Literal['S'] | Literal['<'] = None,
) -> npt.NDArray[Any]:
count = int(count)
itemsize = int(np.empty([], dtype = dtype).itemsize)
@@ -142,7 +144,7 @@ class GGUFReader:
# TODO: add option to generate error on duplicate keys
# raise KeyError(f'Duplicate {field.name} already in list at offset {field.offset}')
- print(f'Warning: Duplicate key {field.name} at offset {field.offset}')
+ logger.warning(f'Duplicate key {field.name} at offset {field.offset}')
self.fields[field.name + '_{}'.format(field.offset)] = field
else:
self.fields[field.name] = field
@@ -248,7 +250,7 @@ class GGUFReader:
raise ValueError(f'Found duplicated tensor with name {tensor_name}')
tensor_names.add(tensor_name)
ggml_type = GGMLQuantizationType(raw_dtype[0])
- n_elems = np.prod(dims)
+ n_elems = int(np.prod(dims))
block_size, type_size = GGML_QUANT_SIZES[ggml_type]
n_bytes = n_elems * type_size // block_size
data_offs = int(start_offs + offset_tensor[0])
diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py
index 089aece87..8dcf9330b 100644
--- a/gguf-py/gguf/gguf_writer.py
+++ b/gguf-py/gguf/gguf_writer.py
@@ -1,12 +1,13 @@
from __future__ import annotations
+import logging
import os
import shutil
import struct
import tempfile
from enum import Enum, auto
from io import BufferedWriter
-from typing import IO, Any, Sequence, Mapping
+from typing import IO, Any, Callable, Sequence, Mapping
from string import ascii_letters, digits
import numpy as np
@@ -24,6 +25,49 @@ from .constants import (
TokenType,
)
+logger = logging.getLogger(__name__)
+
+
+class LazyTensor:
+ data: Callable[[], np.ndarray[Any, Any]]
+ # to avoid too deep recursion
+ functions: list[Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]]]
+ dtype: np.dtype[Any]
+ shape: tuple[int, ...]
+
+ def __init__(self, data: Callable[[], np.ndarray[Any, Any]], *, dtype: type, shape: tuple[int, ...]):
+ self.data = data
+ self.functions = []
+ self.dtype = np.dtype(dtype)
+ self.shape = shape
+
+ def astype(self, dtype: type, **kwargs) -> LazyTensor:
+ self.functions.append(lambda n: n.astype(dtype, **kwargs))
+ self.dtype = np.dtype(dtype)
+ return self
+
+ @property
+ def nbytes(self) -> int:
+ size = 1
+ for n in self.shape:
+ size *= n
+ return size * self.dtype.itemsize
+
+ def tofile(self, *args, **kwargs) -> None:
+ data = self.data()
+ for f in self.functions:
+ data = f(data)
+ assert data.shape == self.shape
+ assert data.dtype == self.dtype
+ assert data.nbytes == self.nbytes
+ self.functions = []
+ self.data = lambda: data
+ data.tofile(*args, **kwargs)
+
+ def byteswap(self, *args, **kwargs) -> LazyTensor:
+ self.functions.append(lambda n: n.byteswap(*args, **kwargs))
+ return self
+
class WriterState(Enum):
EMPTY = auto()
@@ -35,7 +79,7 @@ class WriterState(Enum):
class GGUFWriter:
fout: BufferedWriter
temp_file: tempfile.SpooledTemporaryFile[bytes] | None
- tensors: list[np.ndarray[Any, Any]]
+ tensors: list[np.ndarray[Any, Any] | LazyTensor]
_simple_value_packing = {
GGUFValueType.UINT8: "B",
GGUFValueType.INT8: "b",
@@ -67,7 +111,7 @@ class GGUFWriter:
self.use_temp_file = use_temp_file
self.temp_file = None
self.tensors = []
- print("gguf: This GGUF file is for {0} Endian only".format(
+ logger.info("gguf: This GGUF file is for {0} Endian only".format(
"Big" if self.endianess == GGUFEndian.BIG else "Little",
))
self.state = WriterState.EMPTY
@@ -173,7 +217,7 @@ class GGUFWriter:
if pack_fmt is not None:
self.kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL)
elif vtype == GGUFValueType.STRING:
- encoded_val = val.encode("utf8") if isinstance(val, str) else val
+ encoded_val = val.encode("utf-8") if isinstance(val, str) else val
self.kv_data += self._pack("Q", len(encoded_val))
self.kv_data += encoded_val
elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val:
@@ -202,7 +246,7 @@ class GGUFWriter:
raise ValueError(f'Duplicated tensor name {name}')
self.ti_names.add(name)
- encoded_name = name.encode("utf8")
+ encoded_name = name.encode("utf-8")
self.ti_data += self._pack("Q", len(encoded_name))
self.ti_data += encoded_name
n_dims = len(tensor_shape)
@@ -234,7 +278,7 @@ class GGUFWriter:
self.ti_data_count += 1
def add_tensor(
- self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None,
+ self, name: str, tensor: np.ndarray[Any, Any] | LazyTensor, raw_shape: Sequence[int] | None = None,
raw_dtype: GGMLQuantizationType | None = None,
) -> None:
if self.endianess == GGUFEndian.BIG:
@@ -259,7 +303,7 @@ class GGUFWriter:
if pad != 0:
fp.write(bytes([0] * pad))
- def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
+ def write_tensor_data(self, tensor: np.ndarray[Any, Any] | LazyTensor) -> None:
if self.state is not WriterState.TI_DATA:
raise ValueError(f'Expected output file to contain tensor info, got {self.state}')
@@ -269,15 +313,33 @@ class GGUFWriter:
tensor.tofile(self.fout)
self.write_padding(self.fout, tensor.nbytes)
- def write_tensors_to_file(self) -> None:
+ def write_tensors_to_file(self, *, progress: bool = False) -> None:
self.write_ti_data_to_file()
self.write_padding(self.fout, self.fout.tell())
if self.temp_file is None:
+ self.tensors.reverse() # to pop from the "beginning" in constant time
+
+ if progress:
+ from tqdm import tqdm
+
+ total_bytes = sum(t.nbytes for t in self.tensors)
+
+ bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True)
+
+ while True:
+ try:
+ tensor = self.tensors.pop()
+ except IndexError:
+ break
+ tensor.tofile(self.fout)
+ bar.update(tensor.nbytes)
+ self.write_padding(self.fout, tensor.nbytes)
+ return
while True:
try:
- tensor = self.tensors.pop(0)
+ tensor = self.tensors.pop()
except IndexError:
break
tensor.tofile(self.fout)
@@ -476,7 +538,7 @@ class GGUFWriter:
self.add_bool(Keys.Tokenizer.ADD_PREFIX, value)
def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None:
- if isinstance(value, list):
+ if not isinstance(value, str):
template_default = None
template_names = set()
diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py
index 378eaecad..3ba99be4f 100644
--- a/gguf-py/gguf/vocab.py
+++ b/gguf-py/gguf/vocab.py
@@ -1,23 +1,25 @@
from __future__ import annotations
+import logging
import json
import os
-import sys
from pathlib import Path
-from typing import Any, Callable
+from typing import Any, Callable, Sequence, Mapping, Iterable
from .gguf_writer import GGUFWriter
+logger = logging.getLogger(__name__)
+
class SpecialVocab:
merges: list[str]
add_special_token: dict[str, bool]
special_token_ids: dict[str, int]
- chat_template: str | None
+ chat_template: str | Sequence[Mapping[str, str]] | None
def __init__(
self, path: str | os.PathLike[str], load_merges: bool = False,
- special_token_types: tuple[str, ...] | None = None,
+ special_token_types: Iterable[str] | None = None,
n_vocab: int | None = None,
):
self.special_token_ids = {}
@@ -40,38 +42,29 @@ class SpecialVocab:
def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None:
if self.merges:
if not quiet:
- print(f'gguf: Adding {len(self.merges)} merge(s).')
+ logger.info(f'Adding {len(self.merges)} merge(s).')
gw.add_token_merges(self.merges)
elif self.load_merges:
- print(
- 'gguf: WARNING: Adding merges requested but no merges found, output may be non-functional.',
- file = sys.stderr,
- )
+ logger.warning('Adding merges requested but no merges found, output may be non-functional.')
for typ, tokid in self.special_token_ids.items():
id_handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None)
if id_handler is None:
- print(
- f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping',
- file = sys.stderr,
- )
+ logger.warning(f'No handler for special token type {typ} with id {tokid} - skipping')
continue
if not quiet:
- print(f'gguf: Setting special token type {typ} to {tokid}')
+ logger.info(f'Setting special token type {typ} to {tokid}')
id_handler(tokid)
for typ, value in self.add_special_token.items():
add_handler: Callable[[bool], None] | None = getattr(gw, f'add_add_{typ}_token', None)
if add_handler is None:
- print(
- f'gguf: WARNING: No handler for add_{typ}_token with value {value} - skipping',
- file = sys.stderr,
- )
+ logger.warning(f'No handler for add_{typ}_token with value {value} - skipping')
continue
if not quiet:
- print(f'gguf: Setting add_{typ}_token to {value}')
+ logger.info(f'Setting add_{typ}_token to {value}')
add_handler(value)
if self.chat_template is not None:
if not quiet:
- print(f'gguf: Setting chat_template to {self.chat_template}')
+ logger.info(f'Setting chat_template to {self.chat_template}')
gw.add_chat_template(self.chat_template)
def _load(self, path: Path) -> None:
@@ -99,10 +92,7 @@ class SpecialVocab:
continue
parts = line.split(None, 3)
if len(parts) != 2:
- print(
- f'gguf: WARNING: {merges_file.name}: Line {line_num}: Entry malformed, ignoring',
- file = sys.stderr,
- )
+ logger.warning(f'{merges_file.name}: Line {line_num}: Entry malformed, ignoring')
continue
merges.append(f'{parts[0]} {parts[1]}')
self.merges = merges
@@ -118,10 +108,7 @@ class SpecialVocab:
return
self.special_token_ids[typ] = tid
return
- print(
- f'gguf: WARNING: Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping',
- file = sys.stderr,
- )
+ logger.warning(f'Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping')
def _try_load_from_tokenizer_json(self, path: Path) -> bool:
tokenizer_file = path / 'tokenizer.json'
@@ -144,10 +131,7 @@ class SpecialVocab:
if chat_template is None or isinstance(chat_template, (str, list)):
self.chat_template = chat_template
else:
- print(
- f'gguf: WARNING: Bad type for chat_template field in {tokenizer_config_file!r} - ignoring',
- file = sys.stderr
- )
+ logger.warning(f'Bad type for chat_template field in {tokenizer_config_file!r} - ignoring')
for typ in self.special_token_types:
add_entry = tokenizer_config.get(f'add_{typ}_token')
if isinstance(add_entry, bool):
diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml
index d1d876d6d..36e63ee3b 100644
--- a/gguf-py/pyproject.toml
+++ b/gguf-py/pyproject.toml
@@ -21,6 +21,7 @@ classifiers = [
[tool.poetry.dependencies]
python = ">=3.8"
numpy = ">=1.17"
+tqdm = ">=4.27"
[tool.poetry.dev-dependencies]
pytest = "^5.2"
diff --git a/gguf-py/scripts/gguf-convert-endian.py b/gguf-py/scripts/gguf-convert-endian.py
index 10a16ad06..b698af0fe 100755
--- a/gguf-py/scripts/gguf-convert-endian.py
+++ b/gguf-py/scripts/gguf-convert-endian.py
@@ -1,9 +1,11 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import os
import sys
+from tqdm import tqdm
from pathlib import Path
import numpy as np
@@ -14,6 +16,8 @@ if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent /
import gguf
+logger = logging.getLogger("gguf-convert-endian")
+
def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None:
if np.uint32(1) == np.uint32(1).newbyteorder("<"):
@@ -29,11 +33,11 @@ def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None
else:
file_endian = host_endian
order = host_endian if args.order == "native" else args.order
- print(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian")
+ logger.info(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian")
if file_endian == order:
- print(f"* File is already {order.upper()} endian. Nothing to do.")
+ logger.info(f"* File is already {order.upper()} endian. Nothing to do.")
sys.exit(0)
- print("* Checking tensors for conversion compatibility")
+ logger.info("* Checking tensors for conversion compatibility")
for tensor in reader.tensors:
if tensor.tensor_type not in (
gguf.GGMLQuantizationType.F32,
@@ -41,51 +45,64 @@ def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None
gguf.GGMLQuantizationType.Q8_0,
):
raise ValueError(f"Cannot handle type {tensor.tensor_type.name} for tensor {repr(tensor.name)}")
- print(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}")
+ logger.info(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}")
if args.dry_run:
return
- print("\n*** Warning *** Warning *** Warning **")
- print("* This conversion process may damage the file. Ensure you have a backup.")
+ logger.warning("*** Warning *** Warning *** Warning **")
+ logger.warning("* This conversion process may damage the file. Ensure you have a backup.")
if order != host_endian:
- print("* Requested endian differs from host, you will not be able to load the model on this machine.")
- print("* The file will be modified immediately, so if conversion fails or is interrupted")
- print("* the file will be corrupted. Enter exactly YES if you are positive you want to proceed:")
+ logger.warning("* Requested endian differs from host, you will not be able to load the model on this machine.")
+ logger.warning("* The file will be modified immediately, so if conversion fails or is interrupted")
+ logger.warning("* the file will be corrupted. Enter exactly YES if you are positive you want to proceed:")
response = input("YES, I am sure> ")
if response != "YES":
- print("You didn't enter YES. Okay then, see ya!")
+ logger.warning("You didn't enter YES. Okay then, see ya!")
sys.exit(0)
- print(f"\n* Converting fields ({len(reader.fields)})")
+ logger.info(f"* Converting fields ({len(reader.fields)})")
for idx, field in enumerate(reader.fields.values()):
- print(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}")
+ logger.info(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}")
for part in field.parts:
part.byteswap(inplace=True)
- print(f"\n* Converting tensors ({len(reader.tensors)})")
- for idx, tensor in enumerate(reader.tensors):
- print(
- f" - {idx:4}: Converting tensor {repr(tensor.name)}, type={tensor.tensor_type.name}, "
- f"elements={tensor.n_elements}... ",
- end="",
+ logger.info(f"* Converting tensors ({len(reader.tensors)})")
+
+ for idx, tensor in enumerate(pbar := tqdm(reader.tensors, desc="Converting tensor")):
+ log_message = (
+ f"Converting tensor {repr(tensor.name)}, "
+ f"type={tensor.tensor_type.name}, "
+ f"elements={tensor.n_elements} "
)
- tensor_type = tensor.tensor_type
+
+ # Byte-swap each part of the tensor's field
for part in tensor.field.parts:
part.byteswap(inplace=True)
- if tensor_type != gguf.GGMLQuantizationType.Q8_0:
+
+ # Byte-swap tensor data if necessary
+ if tensor.tensor_type == gguf.GGMLQuantizationType.Q8_0:
+ # Handle Q8_0 tensor blocks (block_q8_0)
+ # Specific handling of block_q8_0 is required.
+ # Each block_q8_0 consists of an f16 delta (scaling factor) followed by 32 int8 quantizations.
+
+ block_size = 34 # 34 bytes = + 32 *
+
+ n_blocks = len(tensor.data) // block_size
+ for block_num in (inner_pbar := tqdm(range(n_blocks), desc="Byte-swapping Blocks", leave=False)):
+ block_offs = block_num * block_size
+
+ # Byte-Swap f16 sized delta field
+ delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16)
+ delta.byteswap(inplace=True)
+
+ # Byte-Swap Q8 weights
+ if block_num % 100000 == 0:
+ inner_pbar.set_description(f"Byte-swapping Blocks [{(n_blocks - block_num) // n_blocks}]")
+
+ else:
+ # Handle other tensor types
tensor.data.byteswap(inplace=True)
- print()
- continue
- # A Q8_0 block consists of a f16 delta followed by 32 int8 quants, so 34 bytes
- block_size = 34
- n_blocks = len(tensor.data) // block_size
- for block_num in range(n_blocks):
- block_offs = block_num * block_size
- # I know I said f16, but it doesn't matter here - any simple 16 bit type works.
- delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16)
- delta.byteswap(inplace=True)
- if block_num % 100000 == 0:
- print(f"[{(n_blocks - block_num) // 1000}K]", end="")
- sys.stdout.flush()
- print()
- print("* Completion")
+
+ pbar.set_description(log_message)
+
+ logger.info("* Completion")
def main() -> None:
@@ -102,8 +119,13 @@ def main() -> None:
"--dry-run", action="store_true",
help="Don't actually change anything",
)
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
- print(f'* Loading: {args.model}')
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ logger.info(f'* Loading: {args.model}')
reader = gguf.GGUFReader(args.model, 'r' if args.dry_run else 'r+')
convert_byteorder(reader, args)
diff --git a/gguf-py/scripts/gguf-dump.py b/gguf-py/scripts/gguf-dump.py
index dbf891508..1a37a7b91 100755
--- a/gguf-py/scripts/gguf-dump.py
+++ b/gguf-py/scripts/gguf-dump.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import os
import sys
@@ -15,6 +16,8 @@ if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent /
from gguf import GGUFReader, GGUFValueType # noqa: E402
+logger = logging.getLogger("gguf-dump")
+
def get_file_host_endian(reader: GGUFReader) -> tuple[str, str]:
host_endian = 'LITTLE' if np.uint32(1) == np.uint32(1).newbyteorder("<") else 'BIG'
@@ -29,8 +32,8 @@ def get_file_host_endian(reader: GGUFReader) -> tuple[str, str]:
# please see the comments in the modify_gguf.py example.
def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
host_endian, file_endian = get_file_host_endian(reader)
- print(f'* File is {file_endian} endian, script is running on a {host_endian} endian host.')
- print(f'\n* Dumping {len(reader.fields)} key/value pair(s)')
+ print(f'* File is {file_endian} endian, script is running on a {host_endian} endian host.') # noqa: NP100
+ print(f'* Dumping {len(reader.fields)} key/value pair(s)') # noqa: NP100
for n, field in enumerate(reader.fields.values(), 1):
if not field.types:
pretty_type = 'N/A'
@@ -39,20 +42,21 @@ def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
pretty_type = '[' * nest_count + str(field.types[-1].name) + ']' * nest_count
else:
pretty_type = str(field.types[-1].name)
- print(f' {n:5}: {pretty_type:10} | {len(field.data):8} | {field.name}', end = '')
+
+ log_message = f' {n:5}: {pretty_type:10} | {len(field.data):8} | {field.name}'
if len(field.types) == 1:
curr_type = field.types[0]
if curr_type == GGUFValueType.STRING:
- print(' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60])), end = '')
+ log_message += ' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60]))
elif field.types[0] in reader.gguf_scalar_to_np:
- print(' = {0}'.format(field.parts[-1][0]), end = '')
- print()
+ log_message += ' = {0}'.format(field.parts[-1][0])
+ print(log_message) # noqa: NP100
if args.no_tensors:
return
- print(f'\n* Dumping {len(reader.tensors)} tensor(s)')
+ print(f'* Dumping {len(reader.tensors)} tensor(s)') # noqa: NP100
for n, tensor in enumerate(reader.tensors, 1):
prettydims = ', '.join('{0:5}'.format(d) for d in list(tensor.shape) + [1] * (4 - len(tensor.shape)))
- print(f' {n:5}: {tensor.n_elements:10} | {prettydims} | {tensor.tensor_type.name:7} | {tensor.name}')
+ print(f' {n:5}: {tensor.n_elements:10} | {prettydims} | {tensor.tensor_type.name:7} | {tensor.name}') # noqa: NP100
def dump_metadata_json(reader: GGUFReader, args: argparse.Namespace) -> None:
@@ -103,10 +107,17 @@ def main() -> None:
parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata")
parser.add_argument("--json", action="store_true", help="Produce JSON output")
parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
if not args.json:
- print(f'* Loading: {args.model}')
+ logger.info(f'* Loading: {args.model}')
+
reader = GGUFReader(args.model, 'r')
+
if args.json:
dump_metadata_json(reader, args)
else:
diff --git a/gguf-py/scripts/gguf-new-metadata.py b/gguf-py/scripts/gguf-new-metadata.py
old mode 100644
new mode 100755
index 3444ab418..63d3c5d8f
--- a/gguf-py/scripts/gguf-new-metadata.py
+++ b/gguf-py/scripts/gguf-new-metadata.py
@@ -7,7 +7,8 @@ import json
from pathlib import Path
import numpy as np
-from typing import Any, Mapping, Sequence
+from tqdm import tqdm
+from typing import Any, Sequence, NamedTuple
# Necessary to load the local gguf package
if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():
@@ -18,6 +19,12 @@ import gguf
logger = logging.getLogger("gguf-new-metadata")
+class MetadataDetails(NamedTuple):
+ type: gguf.GGUFValueType
+ value: Any
+ description: str = ''
+
+
def get_byteorder(reader: gguf.GGUFReader) -> gguf.GGUFEndian:
if np.uint32(1) == np.uint32(1).newbyteorder("<"):
# Host is little endian
@@ -34,7 +41,7 @@ def get_byteorder(reader: gguf.GGUFReader) -> gguf.GGUFEndian:
return host_endian
-def decode_field(field: gguf.ReaderField) -> Any:
+def decode_field(field: gguf.ReaderField | None) -> Any:
if field and field.types:
main_type = field.types[0]
@@ -42,11 +49,11 @@ def decode_field(field: gguf.ReaderField) -> Any:
sub_type = field.types[-1]
if sub_type == gguf.GGUFValueType.STRING:
- return [str(bytes(field.parts[idx]), encoding='utf8') for idx in field.data]
+ return [str(bytes(field.parts[idx]), encoding='utf-8') for idx in field.data]
else:
return [pv for idx in field.data for pv in field.parts[idx].tolist()]
if main_type == gguf.GGUFValueType.STRING:
- return str(bytes(field.parts[-1]), encoding='utf8')
+ return str(bytes(field.parts[-1]), encoding='utf-8')
else:
return field.parts[-1][0]
@@ -59,7 +66,16 @@ def get_field_data(reader: gguf.GGUFReader, key: str) -> Any:
return decode_field(field)
-def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: Mapping[str, str], remove_metadata: Sequence[str]) -> None:
+def find_token(token_list: Sequence[int], token: str) -> Sequence[int]:
+ token_ids = [index for index, value in enumerate(token_list) if value == token]
+
+ if len(token_ids) == 0:
+ raise LookupError(f'Unable to find "{token}" in token list!')
+
+ return token_ids
+
+
+def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: dict[str, MetadataDetails], remove_metadata: Sequence[str]) -> None:
for field in reader.fields.values():
# Suppress virtual fields and fields written by GGUFWriter
if field.name == gguf.Keys.General.ARCHITECTURE or field.name.startswith('GGUF.'):
@@ -75,54 +91,64 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new
logger.debug(f'Removing {field.name}')
continue
- old_val = decode_field(field)
+ old_val = MetadataDetails(field.types[0], decode_field(field))
val = new_metadata.get(field.name, old_val)
if field.name in new_metadata:
- logger.debug(f'Modifying {field.name}: "{old_val}" -> "{val}"')
+ logger.debug(f'Modifying {field.name}: "{old_val.value}" -> "{val.value}" {val.description}')
del new_metadata[field.name]
- elif val is not None:
+ elif val.value is not None:
logger.debug(f'Copying {field.name}')
- if val is not None:
+ if val.value is not None:
writer.add_key(field.name)
- writer.add_val(val, field.types[0])
+ writer.add_val(val.value, val.type)
if gguf.Keys.Tokenizer.CHAT_TEMPLATE in new_metadata:
logger.debug('Adding chat template(s)')
- writer.add_chat_template(new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE])
+ writer.add_chat_template(new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE].value)
del new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE]
- # TODO: Support other types than string?
for key, val in new_metadata.items():
- logger.debug(f'Adding {key}: {val}')
+ logger.debug(f'Adding {key}: "{val.value}" {val.description}')
writer.add_key(key)
- writer.add_val(val, gguf.GGUFValueType.STRING)
+ writer.add_val(val.value, val.type)
+
+ total_bytes = 0
for tensor in reader.tensors:
+ total_bytes += tensor.n_bytes
# Dimensions are written in reverse order, so flip them first
- shape = np.flipud(tensor.shape)
+ shape = np.flipud(tensor.shape).tolist()
writer.add_tensor_info(tensor.name, shape, tensor.data.dtype, tensor.data.nbytes, tensor.tensor_type)
+ bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True)
+
writer.write_header_to_file()
writer.write_kv_data_to_file()
writer.write_ti_data_to_file()
for tensor in reader.tensors:
writer.write_tensor_data(tensor.data)
+ bar.update(tensor.n_bytes)
writer.close()
def main() -> None:
+ tokenizer_metadata = (getattr(gguf.Keys.Tokenizer, n) for n in gguf.Keys.Tokenizer.__dict__.keys() if not n.startswith('_'))
+ token_names = dict((n.split('.')[-1][:-len('_token_id')], n) for n in tokenizer_metadata if n.endswith('_token_id'))
+
parser = argparse.ArgumentParser(description="Make a copy of a GGUF file with new metadata")
parser.add_argument("input", type=Path, help="GGUF format model input filename")
parser.add_argument("output", type=Path, help="GGUF format model output filename")
- parser.add_argument("--general-name", type=str, help="The models general.name")
- parser.add_argument("--general-description", type=str, help="The models general.description")
- parser.add_argument("--chat-template", type=str, help="Chat template string (or JSON string containing templates)")
- parser.add_argument("--chat-template-config", type=Path, help="Config file (tokenizer_config.json) containing chat template(s)")
- parser.add_argument("--remove-metadata", action="append", type=str, help="Remove metadata (by key name) from output model")
+ parser.add_argument("--general-name", type=str, help="The models general.name", metavar='"name"')
+ parser.add_argument("--general-description", type=str, help="The models general.description", metavar='"Description ..."')
+ parser.add_argument("--chat-template", type=str, help="Chat template string (or JSON string containing templates)", metavar='"{% ... %} ..."')
+ parser.add_argument("--chat-template-config", type=Path, help="Config file containing chat template(s)", metavar='tokenizer_config.json')
+ parser.add_argument("--remove-metadata", action="append", type=str, help="Remove metadata (by key name) from output model", metavar='general.url')
+ parser.add_argument("--special-token", action="append", type=str, help="Special token by value", nargs=2, metavar=(' | '.join(token_names.keys()), '""'))
+ parser.add_argument("--special-token-by-id", action="append", type=str, help="Special token by id", nargs=2, metavar=(' | '.join(token_names.keys()), '0'))
parser.add_argument("--force", action="store_true", help="Bypass warnings without confirmation")
parser.add_argument("--verbose", action="store_true", help="Increase output verbosity")
args = parser.parse_args(None if len(sys.argv) > 2 else ["--help"])
@@ -133,20 +159,20 @@ def main() -> None:
remove_metadata = args.remove_metadata or []
if args.general_name:
- new_metadata[gguf.Keys.General.NAME] = args.general_name
+ new_metadata[gguf.Keys.General.NAME] = MetadataDetails(gguf.GGUFValueType.STRING, args.general_name)
if args.general_description:
- new_metadata[gguf.Keys.General.DESCRIPTION] = args.general_description
+ new_metadata[gguf.Keys.General.DESCRIPTION] = MetadataDetails(gguf.GGUFValueType.STRING, args.general_description)
if args.chat_template:
- new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE] = json.loads(args.chat_template) if args.chat_template.startswith('[') else args.chat_template
+ new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE] = MetadataDetails(gguf.GGUFValueType.STRING, json.loads(args.chat_template) if args.chat_template.startswith('[') else args.chat_template)
if args.chat_template_config:
with open(args.chat_template_config, 'r') as fp:
config = json.load(fp)
template = config.get('chat_template')
if template:
- new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE] = template
+ new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE] = MetadataDetails(gguf.GGUFValueType.STRING, template)
if remove_metadata:
logger.warning('*** Warning *** Warning *** Warning **')
@@ -166,6 +192,32 @@ def main() -> None:
arch = get_field_data(reader, gguf.Keys.General.ARCHITECTURE)
endianess = get_byteorder(reader)
+ token_list = get_field_data(reader, gguf.Keys.Tokenizer.LIST) or []
+
+ for name, token in args.special_token or []:
+ if name not in token_names:
+ logger.warning(f'Unknown special token "{name}", ignoring...')
+ else:
+ ids = find_token(token_list, token)
+ new_metadata[token_names[name]] = MetadataDetails(gguf.GGUFValueType.UINT32, ids[0], f'= {token}')
+
+ if len(ids) > 1:
+ logger.warning(f'Multiple "{token}" tokens found, choosing ID {ids[0]}, use --special-token-by-id if you want another:')
+ logger.warning(', '.join(str(i) for i in ids))
+
+ for name, id_string in args.special_token_by_id or []:
+ if name not in token_names:
+ logger.warning(f'Unknown special token "{name}", ignoring...')
+ elif not id_string.isdecimal():
+ raise LookupError(f'Token ID "{id_string}" is not a valid ID!')
+ else:
+ id_int = int(id_string)
+
+ if id_int >= 0 and id_int < len(token_list):
+ new_metadata[token_names[name]] = MetadataDetails(gguf.GGUFValueType.UINT32, id_int, f'= {token_list[id_int]}')
+ else:
+ raise LookupError(f'Token ID {id_int} is not within token list!')
+
if os.path.isfile(args.output) and not args.force:
logger.warning('*** Warning *** Warning *** Warning **')
logger.warning(f'* The "{args.output}" GGUF file already exists, it will be overwritten!')
diff --git a/gguf-py/scripts/gguf-set-metadata.py b/gguf-py/scripts/gguf-set-metadata.py
index 3ebdfa898..e35b651b8 100755
--- a/gguf-py/scripts/gguf-set-metadata.py
+++ b/gguf-py/scripts/gguf-set-metadata.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
+import logging
import argparse
import os
import sys
@@ -10,6 +11,8 @@ if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent /
from gguf import GGUFReader # noqa: E402
+logger = logging.getLogger("gguf-set-metadata")
+
def minimal_example(filename: str) -> None:
reader = GGUFReader(filename, 'r+')
@@ -41,36 +44,33 @@ def minimal_example(filename: str) -> None:
def set_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
field = reader.get_field(args.key)
if field is None:
- print(f'! Field {repr(args.key)} not found', file = sys.stderr)
+ logger.error(f'! Field {repr(args.key)} not found')
sys.exit(1)
# Note that field.types is a list of types. This is because the GGUF
# format supports arrays. For example, an array of UINT32 would
# look like [GGUFValueType.ARRAY, GGUFValueType.UINT32]
handler = reader.gguf_scalar_to_np.get(field.types[0]) if field.types else None
if handler is None:
- print(
- f'! This tool only supports changing simple values, {repr(args.key)} has unsupported type {field.types}',
- file = sys.stderr,
- )
+ logger.error(f'! This tool only supports changing simple values, {repr(args.key)} has unsupported type {field.types}')
sys.exit(1)
current_value = field.parts[field.data[0]][0]
new_value = handler(args.value)
- print(f'* Preparing to change field {repr(args.key)} from {current_value} to {new_value}')
+ logger.info(f'* Preparing to change field {repr(args.key)} from {current_value} to {new_value}')
if current_value == new_value:
- print(f'- Key {repr(args.key)} already set to requested value {current_value}')
+ logger.info(f'- Key {repr(args.key)} already set to requested value {current_value}')
sys.exit(0)
if args.dry_run:
sys.exit(0)
if not args.force:
- print('*** Warning *** Warning *** Warning **')
- print('* Changing fields in a GGUF file can make it unusable. Proceed at your own risk.')
- print('* Enter exactly YES if you are positive you want to proceed:')
+ logger.warning('*** Warning *** Warning *** Warning **')
+ logger.warning('* Changing fields in a GGUF file can make it unusable. Proceed at your own risk.')
+ logger.warning('* Enter exactly YES if you are positive you want to proceed:')
response = input('YES, I am sure> ')
if response != 'YES':
- print("You didn't enter YES. Okay then, see ya!")
+ logger.info("You didn't enter YES. Okay then, see ya!")
sys.exit(0)
field.parts[field.data[0]][0] = new_value
- print('* Field changed. Successful completion.')
+ logger.info('* Field changed. Successful completion.')
def main() -> None:
@@ -80,8 +80,13 @@ def main() -> None:
parser.add_argument("value", type=str, help="Metadata value to set")
parser.add_argument("--dry-run", action="store_true", help="Don't actually change anything")
parser.add_argument("--force", action="store_true", help="Change the field without confirmation")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
- print(f'* Loading: {args.model}')
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ logger.info(f'* Loading: {args.model}')
reader = GGUFReader(args.model, 'r' if args.dry_run else 'r+')
set_metadata(reader, args)
diff --git a/grammars/README.md b/grammars/README.md
index c924e8d46..2b8384d9d 100644
--- a/grammars/README.md
+++ b/grammars/README.md
@@ -51,7 +51,7 @@ single-line ::= [^\n]+ "\n"`
## Sequences and Alternatives
-The order of symbols in a sequence matter. For example, in `"1. " move " " move "\n"`, the `"1. "` must come before the first `move`, etc.
+The order of symbols in a sequence matters. For example, in `"1. " move " " move "\n"`, the `"1. "` must come before the first `move`, etc.
Alternatives, denoted by `|`, give different sequences that are acceptable. For example, in `move ::= pawn | nonpawn | castle`, `move` can be a `pawn` move, a `nonpawn` move, or a `castle`.
diff --git a/llama.cpp b/llama.cpp
index ee5a3226e..07e4bf9ea 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -2386,7 +2386,7 @@ static bool llama_kv_cache_init(
cache.recurrent = model.arch == LLM_ARCH_MAMBA;
cache.v_trans = !cparams.flash_attn;
- // TODO: support mixed reccurent Transformer architectues
+ // TODO: support mixed recurrent Transformer architectures
// NOTE: (!a || b) is a logical implication (a -> b)
GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s());
GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s());
@@ -3202,6 +3202,7 @@ struct llama_model_loader {
switch (type_max) {
case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
+ case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
@@ -3693,6 +3694,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
switch (ftype) {
case LLAMA_FTYPE_ALL_F32: return "all F32";
case LLAMA_FTYPE_MOSTLY_F16: return "F16";
+ case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
@@ -4425,6 +4427,21 @@ static void llm_load_vocab(
} else if (
tokenizer_pre == "gpt-2") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
+ } else if (
+ tokenizer_pre == "refact") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
+ } else if (
+ tokenizer_pre == "command-r") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
+ } else if (
+ tokenizer_pre == "qwen2") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
+ } else if (
+ tokenizer_pre == "olmo") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
+ } else if (
+ tokenizer_pre == "dbrx") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
} else {
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
}
@@ -6211,6 +6228,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|| !(
model.ftype == LLAMA_FTYPE_ALL_F32 ||
model.ftype == LLAMA_FTYPE_MOSTLY_F16 ||
+ model.ftype == LLAMA_FTYPE_MOSTLY_BF16 ||
model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
)
@@ -12195,7 +12213,7 @@ static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id
static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
GGML_ASSERT(llama_is_byte_token(vocab, id));
- const auto& token_data = vocab.id_to_token.at(id);
+ const auto & token_data = vocab.id_to_token.at(id);
switch (llama_vocab_get_type(vocab)) {
case LLAMA_VOCAB_TYPE_SPM: {
auto buf = token_data.text.substr(3, 2);
@@ -12431,6 +12449,7 @@ struct llm_tokenizer_bpe {
case LLAMA_VOCAB_TYPE_BPE:
switch (vocab.type_pre) {
case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
+ case LLAMA_VOCAB_PRE_TYPE_DBRX:
word_collection = unicode_regex_split(text, {
// original regex from tokenizer.json
//"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
@@ -12455,14 +12474,13 @@ struct llm_tokenizer_bpe {
"\\s?\\p{L}+",
"\\s?\\p{P}+",
"[一-龥ࠀ-一가-]+",
- "\\p{N}+",
+ "\\p{N}",
});
break;
case LLAMA_VOCAB_PRE_TYPE_FALCON:
word_collection = unicode_regex_split(text, {
"[\\p{P}\\$\\+<=>\\^~\\|]+",
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- "\\p{N}+",
"[0-9][0-9][0-9]",
});
break;
@@ -12478,11 +12496,26 @@ struct llm_tokenizer_bpe {
});
break;
case LLAMA_VOCAB_PRE_TYPE_STARCODER:
+ case LLAMA_VOCAB_PRE_TYPE_REFACT:
+ case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
+ word_collection = unicode_regex_split(text, {
+ "\\p{N}",
+ "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
+ });
+ break;
case LLAMA_VOCAB_PRE_TYPE_GPT2:
+ case LLAMA_VOCAB_PRE_TYPE_OLMO:
word_collection = unicode_regex_split(text, {
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
});
break;
+ case LLAMA_VOCAB_PRE_TYPE_QWEN2:
+ word_collection = unicode_regex_split(text, {
+ // original regex from tokenizer.json
+ // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
+ "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
+ });
+ break;
default:
// default regex for BPE tokenization pre-processing
word_collection = unicode_regex_split(text, {
@@ -12698,7 +12731,7 @@ struct llm_tokenizer_wpm {
continue;
}
code = unicode_tolower(code);
- if (type == CODEPOINT_TYPE_WHITESPACE) {
+ if (type == CODEPOINT_TYPE_SEPARATOR) {
code = ' ';
}
std::string s = unicode_cpt_to_utf8(code);
@@ -14385,13 +14418,16 @@ static void llama_tensor_dequantize_internal(
if (qtype.to_float == NULL) {
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
}
- } else if (tensor->type != GGML_TYPE_F16) {
+ } else if (tensor->type != GGML_TYPE_F16 &&
+ tensor->type != GGML_TYPE_BF16) {
throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
}
if (nthread < 2) {
if (tensor->type == GGML_TYPE_F16) {
ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
+ } else if (tensor->type == GGML_TYPE_BF16) {
+ ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
} else if (ggml_is_quantized(tensor->type)) {
qtype.to_float(tensor->data, f32_output, nelements);
} else {
@@ -14400,7 +14436,14 @@ static void llama_tensor_dequantize_internal(
return;
}
- size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
+ size_t block_size;
+ if (tensor->type == GGML_TYPE_F16 ||
+ tensor->type == GGML_TYPE_BF16) {
+ block_size = 1;
+ } else {
+ block_size = (size_t)ggml_blck_size(tensor->type);
+ }
+
size_t block_size_bytes = ggml_type_size(tensor->type);
GGML_ASSERT(nelements % block_size == 0);
@@ -14419,6 +14462,8 @@ static void llama_tensor_dequantize_internal(
auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
if (typ == GGML_TYPE_F16) {
ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
+ } else if (typ == GGML_TYPE_BF16) {
+ ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
} else {
qtype.to_float(inbuf, outbuf, nels);
}
@@ -14779,6 +14824,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
case LLAMA_FTYPE_MOSTLY_F16: default_type = GGML_TYPE_F16; break;
+ case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break;
// K-quants
@@ -15716,13 +15762,6 @@ struct llama_context * llama_new_context_with_model(
cparams.flash_attn = false;
}
-#ifdef GGML_USE_HIPBLAS
- if (cparams.flash_attn) {
- LLAMA_LOG_WARN("%s: flash_attn is not yet compatible with HIPBLAS builds - forcing off\n", __func__);
- cparams.flash_attn = false;
- }
-#endif
-
if (params.seed == LLAMA_DEFAULT_SEED) {
params.seed = time(NULL);
}
@@ -17710,9 +17749,10 @@ int32_t llama_tokenize(
static std::string llama_decode_text(const std::string & text) {
std::string decoded_text;
- auto unicode_sequences = unicode_cpts_from_utf8(text);
- for (auto & unicode_sequence : unicode_sequences) {
- decoded_text += unicode_utf8_to_byte(unicode_cpt_to_utf8(unicode_sequence));
+
+ const auto cpts = unicode_cpts_from_utf8(text);
+ for (const auto cpt : cpts) {
+ decoded_text += unicode_utf8_to_byte(unicode_cpt_to_utf8(cpt));
}
return decoded_text;
@@ -18076,7 +18116,7 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) {
/*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
/*.n_sample =*/ std::max(1, ctx->n_sample),
- /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
+ /*.n_p_eval =*/ std::max(0, ctx->n_p_eval),
/*.n_eval =*/ std::max(1, ctx->n_eval),
};
diff --git a/llama.h b/llama.h
index 059d78f11..0b2e708d0 100644
--- a/llama.h
+++ b/llama.h
@@ -79,6 +79,11 @@ extern "C" {
LLAMA_VOCAB_PRE_TYPE_MPT = 5,
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
+ LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
+ LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
+ LLAMA_VOCAB_PRE_TYPE_QWEN2 = 10,
+ LLAMA_VOCAB_PRE_TYPE_OLMO = 11,
+ LLAMA_VOCAB_PRE_TYPE_DBRX = 12,
};
// note: these values should be synchronized with ggml_rope
@@ -134,6 +139,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
};
@@ -171,7 +177,7 @@ extern "C" {
bool sorted;
} llama_token_data_array;
- typedef bool (*llama_progress_callback)(float progress, void *ctx);
+ typedef bool (*llama_progress_callback)(float progress, void * user_data);
// Input data for llama_decode
// A llama_batch object can contain input about one or many sequences
diff --git a/models/ggml-vocab-bert-bge.gguf.inp b/models/ggml-vocab-bert-bge.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-bert-bge.gguf.inp
+++ b/models/ggml-vocab-bert-bge.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-bert-bge.gguf.out b/models/ggml-vocab-bert-bge.gguf.out
index 969552e1c..e4a76cdb0 100644
--- a/models/ggml-vocab-bert-bge.gguf.out
+++ b/models/ggml-vocab-bert-bge.gguf.out
@@ -1,3 +1,5 @@
+ 29464 2094 1018 1092 2706
+ 11865 17875
diff --git a/models/ggml-vocab-command-r.gguf b/models/ggml-vocab-command-r.gguf
new file mode 100644
index 000000000..b553eab33
Binary files /dev/null and b/models/ggml-vocab-command-r.gguf differ
diff --git a/models/ggml-vocab-command-r.gguf.inp b/models/ggml-vocab-command-r.gguf.inp
new file mode 100644
index 000000000..0a89107c6
--- /dev/null
+++ b/models/ggml-vocab-command-r.gguf.inp
@@ -0,0 +1,106 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+
+__ggml_vocab_test__
+
+
+
+__ggml_vocab_test__
+
+
+
+
+__ggml_vocab_test__
+
+
+__ggml_vocab_test__
+Hello world
+__ggml_vocab_test__
+ Hello world
+__ggml_vocab_test__
+Hello World
+__ggml_vocab_test__
+ Hello World
+__ggml_vocab_test__
+ Hello World!
+__ggml_vocab_test__
+Hello, world!
+__ggml_vocab_test__
+ Hello, world!
+__ggml_vocab_test__
+ this is 🦙.cpp
+__ggml_vocab_test__
+w048 7tuijk dsdfhu
+__ggml_vocab_test__
+нещо на Български
+__ggml_vocab_test__
+កាន់តែពិសេសអាចខលចេញ
+__ggml_vocab_test__
+🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)
+__ggml_vocab_test__
+Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+ Hello
+__ggml_vocab_test__
+ (
+__ggml_vocab_test__
+
+ =
+__ggml_vocab_test__
+' era
+__ggml_vocab_test__
+Hello, y'all! How are you 😁 ?我想在apple工作1314151天~
+__ggml_vocab_test__
+3
+__ggml_vocab_test__
+33
+__ggml_vocab_test__
+333
+__ggml_vocab_test__
+3333
+__ggml_vocab_test__
+33333
+__ggml_vocab_test__
+333333
+__ggml_vocab_test__
+3333333
+__ggml_vocab_test__
+33333333
+__ggml_vocab_test__
+333333333
+__ggml_vocab_test__
+
+
+
+
+
+
+
+
+
+
+
+🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
+__ggml_vocab_test__
diff --git a/models/ggml-vocab-command-r.gguf.out b/models/ggml-vocab-command-r.gguf.out
new file mode 100644
index 000000000..cc4277daa
--- /dev/null
+++ b/models/ggml-vocab-command-r.gguf.out
@@ -0,0 +1,43 @@
+ 2536 228 27 228 22957 6983
+ 45 193433
+
+ 228
+ 1667
+ 1742
+ 205
+ 206
+ 2126
+ 11516
+ 34777
+ 28339 3845
+ 46609 3845
+ 28339 3930
+ 46609 3930
+ 46609 3930 8
+ 28339 19 3845 8
+ 46609 19 3845 8
+ 2075 1801 11254 107 255 21 19317
+ 94 23 27 31 228 30 21213 20752 39267 6405 9980
+ 4929 40071 2196 3236 8750 1764 37097 41168
+ 38111 230 174833 38111 249 86325 241 38111 245 86325 232 38111 252 38111 123 38111 261 165 24629 38111 261 38111 103 174833 38111 235 38111 231 38111 257 38111 235 165 24629 38111 239
+ 2226 256 230 1737 18258 16 80503 122 35927 2226 242 112 57462 1737 54457 223165 106230 2096 16 48389 1737 10203 109160 1875 2222 2517 3342 12523 16
+ 28339
+ 46609
+ 228 46609
+ 1667 46609
+ 1742 46609
+ 1742 46609 1856 46609
+ 1737
+ 206 1857
+ 14 4515
+ 28339 19 1770 14 1954 8 4070 1955 1933 80503 231 5691 12081 13336 2648 29325 14315 24 26 24 27 24 28 24 5123 18372
+ 26
+ 26 26
+ 26 26 26
+ 26 26 26 26
+ 26 26 26 26 26
+ 26 26 26 26 26 26
+ 26 26 26 26 26 26 26
+ 26 26 26 26 26 26 26 26
+ 26 26 26 26 26 26 26 26 26
+ 127731 51628 205 57788 18494 97469 126134 206 2226 256 230 1737 18258 16 80503 122 35927 2226 242 112 57462 1737 54457 223165 106230 2096 16 48389 11254 107 255 2226 107 255 228 26 228 26 26 228 26 26 26 228 26 26 26 26 228 26 26 26 26 26 228 26 26 26 26 26 26 228 26 26 26 26 26 26 26 228 26 26 26 26 26 26 26 26 228 26 21 26 228 26 2271 26 228 26 3834 26 182018 230 174833 38111 249 86325 241 38111 245 86325 232 38111 252 38111 123 38111 261 165 24629 38111 261 38111 103 174833 38111 235 188568 231 5691 12081 13336 2648 29325 14315 24 26 24 27 24 28 24 5123 18372 8391 158343 3512 40071 2196 3236 8750 1764 37097 41168 29721 32797 25646 3802 4975 4975 116167 57178 10251 154048 27292 1767 5125 2632 2155 91 2378 1919 1914 2782 19 2155 3354 1933 5470 38 2155 52 2068 5470 1767 4961 3059 1894 19 2155 43 1933 3026 2725 23186 38 2930 14 20676 1671 14 83 51
diff --git a/models/ggml-vocab-deepseek-coder.gguf.inp b/models/ggml-vocab-deepseek-coder.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-deepseek-coder.gguf.inp
+++ b/models/ggml-vocab-deepseek-coder.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-deepseek-coder.gguf.out b/models/ggml-vocab-deepseek-coder.gguf.out
index 8ef585c78..9ccc560d6 100644
--- a/models/ggml-vocab-deepseek-coder.gguf.out
+++ b/models/ggml-vocab-deepseek-coder.gguf.out
@@ -1,3 +1,5 @@
+ 1050 207 19 207 19192 4217
+ 37 32009 71 6247
207
243
diff --git a/models/ggml-vocab-deepseek-llm.gguf.inp b/models/ggml-vocab-deepseek-llm.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-deepseek-llm.gguf.inp
+++ b/models/ggml-vocab-deepseek-llm.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-deepseek-llm.gguf.out b/models/ggml-vocab-deepseek-llm.gguf.out
index 0ea9d66e3..fd94b896d 100644
--- a/models/ggml-vocab-deepseek-llm.gguf.out
+++ b/models/ggml-vocab-deepseek-llm.gguf.out
@@ -1,3 +1,5 @@
+ 1052 207 19 207 19109 4223
+ 37 100014 71 6245
207
243
diff --git a/models/ggml-vocab-falcon.gguf.inp b/models/ggml-vocab-falcon.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-falcon.gguf.inp
+++ b/models/ggml-vocab-falcon.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-falcon.gguf.out b/models/ggml-vocab-falcon.gguf.out
index cb8da7b1a..209b04cda 100644
--- a/models/ggml-vocab-falcon.gguf.out
+++ b/models/ggml-vocab-falcon.gguf.out
@@ -1,3 +1,5 @@
+ 878 204 31 3068 133 2137
+ 28611 132 30042
204
258
diff --git a/models/ggml-vocab-gpt-2.gguf.inp b/models/ggml-vocab-gpt-2.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-gpt-2.gguf.inp
+++ b/models/ggml-vocab-gpt-2.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-gpt-2.gguf.out b/models/ggml-vocab-gpt-2.gguf.out
index 9986f38e4..78430f0d3 100644
--- a/models/ggml-vocab-gpt-2.gguf.out
+++ b/models/ggml-vocab-gpt-2.gguf.out
@@ -1,3 +1,5 @@
+ 798 604 25208 1933
+ 37 9116 71 11751
220
220 220
diff --git a/models/ggml-vocab-llama-bpe.gguf.inp b/models/ggml-vocab-llama-bpe.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-llama-bpe.gguf.inp
+++ b/models/ggml-vocab-llama-bpe.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-llama-bpe.gguf.out b/models/ggml-vocab-llama-bpe.gguf.out
index 4d903e1cd..1f00e3812 100644
--- a/models/ggml-vocab-llama-bpe.gguf.out
+++ b/models/ggml-vocab-llama-bpe.gguf.out
@@ -1,3 +1,5 @@
+ 1142 220 19 220 27154 4038
+ 37 51853 261
220
256
diff --git a/models/ggml-vocab-llama-spm.gguf.inp b/models/ggml-vocab-llama-spm.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-llama-spm.gguf.inp
+++ b/models/ggml-vocab-llama-spm.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-llama-spm.gguf.out b/models/ggml-vocab-llama-spm.gguf.out
index 15d00b106..9c3327cb5 100644
--- a/models/ggml-vocab-llama-spm.gguf.out
+++ b/models/ggml-vocab-llama-spm.gguf.out
@@ -1,3 +1,5 @@
+ 474 287 29871 29946 29871 30226 7378
+ 383 4000 261
259
1678
diff --git a/models/ggml-vocab-mpt.gguf.inp b/models/ggml-vocab-mpt.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-mpt.gguf.inp
+++ b/models/ggml-vocab-mpt.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-mpt.gguf.out b/models/ggml-vocab-mpt.gguf.out
index 1f4b0eb3a..d8d0fe909 100644
--- a/models/ggml-vocab-mpt.gguf.out
+++ b/models/ggml-vocab-mpt.gguf.out
@@ -1,3 +1,5 @@
+ 728 577 24142 2607
+ 39 26288 6554
209
50276
diff --git a/models/ggml-vocab-phi-3.gguf b/models/ggml-vocab-phi-3.gguf
index 72fdb409c..f8022a385 100644
Binary files a/models/ggml-vocab-phi-3.gguf and b/models/ggml-vocab-phi-3.gguf differ
diff --git a/models/ggml-vocab-phi-3.gguf.inp b/models/ggml-vocab-phi-3.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-phi-3.gguf.inp
+++ b/models/ggml-vocab-phi-3.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-phi-3.gguf.out b/models/ggml-vocab-phi-3.gguf.out
index 15d00b106..9c3327cb5 100644
--- a/models/ggml-vocab-phi-3.gguf.out
+++ b/models/ggml-vocab-phi-3.gguf.out
@@ -1,3 +1,5 @@
+ 474 287 29871 29946 29871 30226 7378
+ 383 4000 261
259
1678
diff --git a/models/ggml-vocab-qwen2.gguf b/models/ggml-vocab-qwen2.gguf
new file mode 100644
index 000000000..541e475bc
Binary files /dev/null and b/models/ggml-vocab-qwen2.gguf differ
diff --git a/models/ggml-vocab-qwen2.gguf.inp b/models/ggml-vocab-qwen2.gguf.inp
new file mode 100644
index 000000000..0a89107c6
--- /dev/null
+++ b/models/ggml-vocab-qwen2.gguf.inp
@@ -0,0 +1,106 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+
+__ggml_vocab_test__
+
+
+
+__ggml_vocab_test__
+
+
+
+
+__ggml_vocab_test__
+
+
+__ggml_vocab_test__
+Hello world
+__ggml_vocab_test__
+ Hello world
+__ggml_vocab_test__
+Hello World
+__ggml_vocab_test__
+ Hello World
+__ggml_vocab_test__
+ Hello World!
+__ggml_vocab_test__
+Hello, world!
+__ggml_vocab_test__
+ Hello, world!
+__ggml_vocab_test__
+ this is 🦙.cpp
+__ggml_vocab_test__
+w048 7tuijk dsdfhu
+__ggml_vocab_test__
+нещо на Български
+__ggml_vocab_test__
+កាន់តែពិសេសអាចខលចេញ
+__ggml_vocab_test__
+🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)
+__ggml_vocab_test__
+Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+ Hello
+__ggml_vocab_test__
+ (
+__ggml_vocab_test__
+
+ =
+__ggml_vocab_test__
+' era
+__ggml_vocab_test__
+Hello, y'all! How are you 😁 ?我想在apple工作1314151天~
+__ggml_vocab_test__
+3
+__ggml_vocab_test__
+33
+__ggml_vocab_test__
+333
+__ggml_vocab_test__
+3333
+__ggml_vocab_test__
+33333
+__ggml_vocab_test__
+333333
+__ggml_vocab_test__
+3333333
+__ggml_vocab_test__
+33333333
+__ggml_vocab_test__
+333333333
+__ggml_vocab_test__
+
+
+
+
+
+
+
+
+
+
+
+🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
+__ggml_vocab_test__
diff --git a/models/ggml-vocab-qwen2.gguf.out b/models/ggml-vocab-qwen2.gguf.out
new file mode 100644
index 000000000..401a510e8
--- /dev/null
+++ b/models/ggml-vocab-qwen2.gguf.out
@@ -0,0 +1,43 @@
+ 1122 220 19 220 26062 3951
+ 37 50753 261
+
+ 220
+ 256
+ 262
+ 197
+ 198
+ 271
+ 1406
+ 1572
+ 9707 1879
+ 21927 1879
+ 9707 4337
+ 21927 4337
+ 21927 4337 0
+ 9707 11 1879 0
+ 21927 11 1879 0
+ 419 374 11162 99 247 13 10821
+ 86 15 19 23 220 22 83 1963 41808 11472 2940 16739
+ 78762 14144 1456 13073 63471 33594 3038 133178 79012
+ 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 147805 148301 147270 44258 223 146848
+ 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 320 3243 42365 429 702 1181 1828 3950 8
+ 9707
+ 21927
+ 220 21927
+ 256 21927
+ 262 21927
+ 262 21927 198 262 21927
+ 320
+ 198 284
+ 6 11385
+ 9707 11 379 64848 0 2585 525 498 26525 223 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216
+ 18
+ 18 18
+ 18 18 18
+ 18 18 18 18
+ 18 18 18 18 18
+ 18 18 18 18 18 18
+ 18 18 18 18 18 18 18
+ 18 18 18 18 18 18 18 18
+ 18 18 18 18 18 18 18 18 18
+ 198 4710 14731 65497 7847 1572 2303 78672 10947 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 11162 99 247 149955 220 18 220 18 18 220 18 18 18 220 18 18 18 18 220 18 18 18 18 18 220 18 18 18 18 18 18 220 18 18 18 18 18 18 18 220 18 18 18 18 18 18 18 18 220 18 13 18 220 18 496 18 220 18 1112 18 220 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 144534 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 55460 53237 18658 14144 1456 13073 63471 33594 3038 133178 79012 3355 4605 4605 13874 13874 73594 3014 3014 28149 17085 2928 26610 7646 358 3003 1012 364 83 813 566 594 1052 11 364 787 498 2704 30 364 44 537 2704 358 3278 1281 432 11 364 35 498 1075 1045 15243 30 1205 6 42612 264 63866 43
diff --git a/models/ggml-vocab-refact.gguf b/models/ggml-vocab-refact.gguf
index 8f26cfb76..52afcf01a 100644
Binary files a/models/ggml-vocab-refact.gguf and b/models/ggml-vocab-refact.gguf differ
diff --git a/models/ggml-vocab-refact.gguf.inp b/models/ggml-vocab-refact.gguf.inp
new file mode 100644
index 000000000..0a89107c6
--- /dev/null
+++ b/models/ggml-vocab-refact.gguf.inp
@@ -0,0 +1,106 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+__ggml_vocab_test__
+
+
+__ggml_vocab_test__
+
+
+
+__ggml_vocab_test__
+
+
+
+
+__ggml_vocab_test__
+
+
+__ggml_vocab_test__
+Hello world
+__ggml_vocab_test__
+ Hello world
+__ggml_vocab_test__
+Hello World
+__ggml_vocab_test__
+ Hello World
+__ggml_vocab_test__
+ Hello World!
+__ggml_vocab_test__
+Hello, world!
+__ggml_vocab_test__
+ Hello, world!
+__ggml_vocab_test__
+ this is 🦙.cpp
+__ggml_vocab_test__
+w048 7tuijk dsdfhu
+__ggml_vocab_test__
+нещо на Български
+__ggml_vocab_test__
+កាន់តែពិសេសអាចខលចេញ
+__ggml_vocab_test__
+🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)
+__ggml_vocab_test__
+Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+__ggml_vocab_test__
+ Hello
+ Hello
+__ggml_vocab_test__
+ (
+__ggml_vocab_test__
+
+ =
+__ggml_vocab_test__
+' era
+__ggml_vocab_test__
+Hello, y'all! How are you 😁 ?我想在apple工作1314151天~
+__ggml_vocab_test__
+3
+__ggml_vocab_test__
+33
+__ggml_vocab_test__
+333
+__ggml_vocab_test__
+3333
+__ggml_vocab_test__
+33333
+__ggml_vocab_test__
+333333
+__ggml_vocab_test__
+3333333
+__ggml_vocab_test__
+33333333
+__ggml_vocab_test__
+333333333
+__ggml_vocab_test__
+
+
+
+
+
+
+
+
+
+
+
+🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
+__ggml_vocab_test__
diff --git a/models/ggml-vocab-refact.gguf.out b/models/ggml-vocab-refact.gguf.out
new file mode 100644
index 000000000..06b15c090
--- /dev/null
+++ b/models/ggml-vocab-refact.gguf.out
@@ -0,0 +1,43 @@
+ 4833 225 38 225 143 140 17723
+ 56 2006 3935 265
+
+ 225
+ 261
+ 264
+ 202
+ 203
+ 478
+ 2831
+ 15773
+ 8279 5788
+ 12000 5788
+ 8279 10896
+ 12000 10896
+ 12000 10896 19
+ 8279 30 5788 19
+ 12000 30 5788 19
+ 458 438 5945 118 252 32 3766
+ 105 34 38 42 225 41 102 1707 12530 10180 1479 8278
+ 39862 8372 1039 9446 40242 13852 2053 8949 12531 1520 10700
+ 14574 227 14574 133 14574 246 30457 238 14574 242 30457 229 14574 249 14574 134 14574 258 30457 228 14574 258 14574 114 14574 133 14574 232 14574 228 14574 254 14574 232 30457 228 14574 236
+ 3807 253 227 308 4382 27 18458 133 46113 44967 123 13868 308 12565 19775 33071 40824 733 27 41889 308 2585 22680 688 1401 2819 4369 2404 27
+ 8279
+ 12000
+ 225 12000
+ 261 12000
+ 264 12000
+ 264 12000 284 12000
+ 308
+ 203 280
+ 25 34666
+ 8279 30 533 25 464 19 4971 884 844 18458 228 1018 4982 13368 2909 9513 17827 35 37 35 38 35 39 35 11873 47838
+ 37
+ 37 37
+ 37 37 37
+ 37 37 37 37
+ 37 37 37 37 37
+ 37 37 37 37 37 37
+ 37 37 37 37 37 37 37
+ 37 37 37 37 37 37 37 37
+ 37 37 37 37 37 37 37 37 37
+ 334 719 8878 202 10885 4222 16104 28570 203 3807 253 227 308 4382 27 18458 133 46113 44967 123 13868 308 12565 19775 33071 40824 733 27 41889 5945 118 252 3807 118 252 225 37 225 37 37 225 37 37 37 225 37 37 37 37 225 37 37 37 37 37 225 37 37 37 37 37 37 225 37 37 37 37 37 37 37 225 37 37 37 37 37 37 37 37 225 37 32 37 225 37 497 37 225 37 1179 37 225 14574 227 14574 133 14574 246 30457 238 14574 242 30457 229 14574 249 14574 134 14574 258 30457 228 14574 258 14574 114 14574 133 14574 232 36628 228 1018 4982 13368 2909 9513 17827 35 37 35 38 35 39 35 11873 47838 20921 16623 13028 8372 1039 9446 40242 13852 2053 8949 12531 1520 10700 5881 9592 13299 914 31753 31359 9163 3202 35472 10397 439 4763 2583 330 102 1455 938 1182 2017 30 330 613 844 3654 49 330 63 646 3654 439 4621 1930 561 30 330 54 844 2124 1629 35993 49 2688 25 7709 312 25 94 62
diff --git a/models/ggml-vocab-starcoder.gguf.inp b/models/ggml-vocab-starcoder.gguf.inp
index 0389f00c7..0a89107c6 100644
--- a/models/ggml-vocab-starcoder.gguf.inp
+++ b/models/ggml-vocab-starcoder.gguf.inp
@@ -1,3 +1,7 @@
+ied 4 ½ months
+__ggml_vocab_test__
+Führer
+__ggml_vocab_test__
__ggml_vocab_test__
diff --git a/models/ggml-vocab-starcoder.gguf.out b/models/ggml-vocab-starcoder.gguf.out
index cd04254af..ccb55c7fe 100644
--- a/models/ggml-vocab-starcoder.gguf.out
+++ b/models/ggml-vocab-starcoder.gguf.out
@@ -1,3 +1,5 @@
+ 4850 244 57 244 162 159 17722
+ 75 2022 3943 284
244
280
diff --git a/pyrightconfig.json b/pyrightconfig.json
new file mode 100644
index 000000000..020a71a4e
--- /dev/null
+++ b/pyrightconfig.json
@@ -0,0 +1,3 @@
+{
+ "extraPaths": ["gguf-py"],
+}
diff --git a/requirements/requirements-convert-hf-to-gguf-update.txt b/requirements/requirements-convert-hf-to-gguf-update.txt
index 6ce840d73..6ac402610 100644
--- a/requirements/requirements-convert-hf-to-gguf-update.txt
+++ b/requirements/requirements-convert-hf-to-gguf-update.txt
@@ -1,3 +1,2 @@
-r ./requirements-convert.txt
torch~=2.1.1
-einops~=0.7.0
diff --git a/requirements/requirements-convert-hf-to-gguf.txt b/requirements/requirements-convert-hf-to-gguf.txt
index 6ce840d73..6ac402610 100644
--- a/requirements/requirements-convert-hf-to-gguf.txt
+++ b/requirements/requirements-convert-hf-to-gguf.txt
@@ -1,3 +1,2 @@
-r ./requirements-convert.txt
torch~=2.1.1
-einops~=0.7.0
diff --git a/requirements/requirements-convert.txt b/requirements/requirements-convert.txt
index a3d6ecec0..7ab1228cb 100644
--- a/requirements/requirements-convert.txt
+++ b/requirements/requirements-convert.txt
@@ -1,5 +1,5 @@
numpy~=1.24.4
-sentencepiece~=0.1.98
-transformers>=4.35.2,<5.0.0
+sentencepiece~=0.2.0
+transformers>=4.40.1,<5.0.0
gguf>=0.1.0
protobuf>=4.21.0,<5.0.0
diff --git a/scripts/compare-llama-bench.py b/scripts/compare-llama-bench.py
index ef7f19ecb..fed3c1ee3 100755
--- a/scripts/compare-llama-bench.py
+++ b/scripts/compare-llama-bench.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import logging
import argparse
import heapq
import sys
@@ -11,9 +12,11 @@ try:
import git
from tabulate import tabulate
except ImportError as e:
- print("ERROR: the following Python libraries are required: GitPython, tabulate.")
+ print("the following Python libraries are required: GitPython, tabulate.") # noqa: NP100
raise e
+logger = logging.getLogger("compare-llama-bench")
+
# Properties by which to differentiate results per commit:
KEY_PROPERTIES = [
"cpu_info", "gpu_info", "n_gpu_layers", "main_gpu", "cuda", "opencl", "metal", "gpu_blas",
@@ -90,12 +93,14 @@ help_s = (
"specified values are averaged WITHOUT weighing by the --repetitions parameter of llama-bench."
)
parser.add_argument("-s", "--show", help=help_s)
+parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
known_args, unknown_args = parser.parse_known_args()
+logging.basicConfig(level=logging.DEBUG if known_args.verbose else logging.INFO)
+
if unknown_args:
- print(f"ERROR: Received unknown args: {unknown_args}.")
- print()
+ logger.error(f"Received unknown args: {unknown_args}.\n")
parser.print_help()
sys.exit(1)
@@ -108,8 +113,7 @@ if input_file is None:
input_file = sqlite_files[0]
if input_file is None:
- print("ERROR: Cannot find a suitable input file, please provide one.")
- print()
+ logger.error("Cannot find a suitable input file, please provide one.\n")
parser.print_help()
sys.exit(1)
@@ -194,23 +198,19 @@ if known_args.baseline is not None:
hexsha8_baseline = get_commit_hexsha8(known_args.baseline)
name_baseline = known_args.baseline
if hexsha8_baseline is None:
- print(f"ERROR: cannot find data for baseline={known_args.baseline}.")
+ logger.error(f"cannot find data for baseline={known_args.baseline}.")
sys.exit(1)
# Otherwise, search for the most recent parent of master for which there is data:
elif repo is not None:
hexsha8_baseline = find_parent_in_data(repo.heads.master.commit)
if hexsha8_baseline is None:
- print("ERROR: No baseline was provided and did not find data for any master branch commits.")
- print()
+ logger.error("No baseline was provided and did not find data for any master branch commits.\n")
parser.print_help()
sys.exit(1)
else:
- print(
- "ERROR: No baseline was provided and the current working directory "
- "is not part of a git repository from which a baseline could be inferred."
- )
- print()
+ logger.error("No baseline was provided and the current working directory "
+ "is not part of a git repository from which a baseline could be inferred.\n")
parser.print_help()
sys.exit(1)
@@ -227,7 +227,7 @@ if known_args.compare is not None:
hexsha8_compare = get_commit_hexsha8(known_args.compare)
name_compare = known_args.compare
if hexsha8_compare is None:
- print(f"ERROR: cannot find data for compare={known_args.compare}.")
+ logger.error(f"cannot find data for compare={known_args.compare}.")
sys.exit(1)
# Otherwise, search for the commit for llama-bench was most recently run
# and that is not a parent of master:
@@ -241,16 +241,12 @@ elif repo is not None:
break
if hexsha8_compare is None:
- print("ERROR: No compare target was provided and did not find data for any non-master commits.")
- print()
+ logger.error("No compare target was provided and did not find data for any non-master commits.\n")
parser.print_help()
sys.exit(1)
else:
- print(
- "ERROR: No compare target was provided and the current working directory "
- "is not part of a git repository from which a compare target could be inferred."
- )
- print()
+ logger.error("No compare target was provided and the current working directory "
+ "is not part of a git repository from which a compare target could be inferred.\n")
parser.print_help()
sys.exit(1)
@@ -284,8 +280,7 @@ if known_args.show is not None:
if prop not in KEY_PROPERTIES[:-2]: # Last two values are n_prompt, n_gen.
unknown_cols.append(prop)
if unknown_cols:
- print(f"ERROR: Unknown values for --show: {', '.join(unknown_cols)}")
- print()
+ logger.error(f"Unknown values for --show: {', '.join(unknown_cols)}")
parser.print_usage()
sys.exit(1)
rows_show = get_rows(show)
@@ -369,7 +364,7 @@ if "gpu_info" in show:
headers = [PRETTY_NAMES[p] for p in show]
headers += ["Test", f"t/s {name_baseline}", f"t/s {name_compare}", "Speedup"]
-print(tabulate(
+print(tabulate( # noqa: NP100
table,
headers=headers,
floatfmt=".2f",
diff --git a/scripts/gen-unicode-data.py b/scripts/gen-unicode-data.py
new file mode 100644
index 000000000..37d1e396c
--- /dev/null
+++ b/scripts/gen-unicode-data.py
@@ -0,0 +1,64 @@
+import regex
+
+
+def get_matches(regex_expr):
+ regex_expr_compiled = regex.compile(regex_expr)
+ unicode_ranges = []
+ current_range = None
+
+ for codepoint in range(0x110000):
+ char = chr(codepoint)
+ if regex_expr_compiled.match(char):
+ if current_range is None:
+ current_range = [codepoint, codepoint]
+ else:
+ current_range[1] = codepoint
+ elif current_range is not None:
+ unicode_ranges.append(tuple(current_range))
+ current_range = None
+
+ if current_range is not None:
+ unicode_ranges.append(tuple(current_range))
+
+ return unicode_ranges
+
+
+def print_cat(mode, cat, ranges):
+ if mode == "range":
+ print("const std::vector> unicode_ranges_{} = {{".format(cat)) # noqa: NP100
+ if mode == "map":
+ print("const std::map unicode_map_{} = {{".format(cat)) # noqa: NP100
+ for i, values in enumerate(ranges):
+ end = ",\n" if (i % 4 == 3 or i + 1 == len(ranges)) else ", "
+ values = ["0x%08X" % value for value in values]
+ print("{" + ", ".join(values) + "}", end=end) # noqa: NP100
+ print("};") # noqa: NP100
+ print("") # noqa: NP100
+
+
+print_cat("range", "number", get_matches(r'\p{N}'))
+print_cat("range", "letter", get_matches(r'\p{L}'))
+print_cat("range", "separator", get_matches(r'\p{Z}'))
+print_cat("range", "accent_mark", get_matches(r'\p{M}'))
+print_cat("range", "punctuation", get_matches(r'\p{P}'))
+print_cat("range", "symbol", get_matches(r'\p{S}'))
+print_cat("range", "control", get_matches(r'\p{C}'))
+
+print_cat("range", "whitespace", get_matches(r'\s'))
+
+
+map_lowercase = []
+map_uppercase = []
+for codepoint in range(0x110000):
+ char = chr(codepoint)
+ lower = ord(char.lower()[0])
+ upper = ord(char.upper()[0])
+ if codepoint != lower:
+ map_lowercase.append((codepoint, lower))
+ if codepoint != upper:
+ map_uppercase.append((codepoint, upper))
+print_cat("map", "lowercase", map_lowercase)
+print_cat("map", "uppercase", map_uppercase)
+
+
+# TODO: generate unicode_map_nfd
diff --git a/scripts/run-with-preset.py b/scripts/run-with-preset.py
index a18252730..e986a3604 100755
--- a/scripts/run-with-preset.py
+++ b/scripts/run-with-preset.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import logging
import argparse
import os
import subprocess
@@ -7,6 +8,8 @@ import sys
import yaml
+logger = logging.getLogger("run-with-preset")
+
CLI_ARGS_MAIN_PERPLEXITY = [
"batch-size", "cfg-negative-prompt", "cfg-scale", "chunks", "color", "ctx-size", "escape",
"export", "file", "frequency-penalty", "grammar", "grammar-file", "hellaswag",
@@ -56,6 +59,7 @@ parser.add_argument("-bin", "--binary", help="The binary to run.")
parser.add_argument("yaml_files", nargs="*",
help="Arbitrary number of YAML files from which to read preset values. "
"If two files specify the same values the later one will be used.")
+parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
known_args, unknown_args = parser.parse_known_args()
@@ -63,6 +67,8 @@ if not known_args.yaml_files and not unknown_args:
parser.print_help()
sys.exit(0)
+logging.basicConfig(level=logging.DEBUG if known_args.verbose else logging.INFO)
+
props = dict()
for yaml_file in known_args.yaml_files:
@@ -85,7 +91,7 @@ elif binary.lower().endswith("llama-bench"):
elif binary.lower().endswith("server"):
cli_args = CLI_ARGS_SERVER
else:
- print(f"Unknown binary: {binary}")
+ logger.error(f"Unknown binary: {binary}")
sys.exit(1)
command_list = [binary]
@@ -121,11 +127,11 @@ for cli_arg in cli_args:
num_unused = len(props)
if num_unused > 10:
- print(f"The preset file contained a total of {num_unused} unused properties.")
+ logger.info(f"The preset file contained a total of {num_unused} unused properties.")
elif num_unused > 0:
- print("The preset file contained the following unused properties:")
+ logger.info("The preset file contained the following unused properties:")
for prop, value in props.items():
- print(f" {prop}: {value}")
+ logger.info(f" {prop}: {value}")
command_list += unknown_args
diff --git a/scripts/verify-checksum-models.py b/scripts/verify-checksum-models.py
index dff4b4734..0b5b9aafa 100755
--- a/scripts/verify-checksum-models.py
+++ b/scripts/verify-checksum-models.py
@@ -1,8 +1,11 @@
#!/usr/bin/env python3
+import logging
import os
import hashlib
+logger = logging.getLogger("verify-checksum-models")
+
def sha256sum(file):
block_size = 16 * 1024 * 1024 # 16 MB block size
@@ -27,7 +30,7 @@ hash_list_file = os.path.join(llama_path, "SHA256SUMS")
# Check if the hash list file exists
if not os.path.exists(hash_list_file):
- print(f"Hash list file not found: {hash_list_file}")
+ logger.error(f"Hash list file not found: {hash_list_file}")
exit(1)
# Read the hash file content and split it into an array of lines
@@ -46,7 +49,7 @@ for line in hash_list:
file_path = os.path.join(llama_path, filename)
# Informing user of the progress of the integrity check
- print(f"Verifying the checksum of {file_path}")
+ logger.info(f"Verifying the checksum of {file_path}")
# Check if the file exists
if os.path.exists(file_path):
@@ -73,9 +76,9 @@ for line in hash_list:
# Print column headers for results table
-print("\n" + "filename".ljust(40) + "valid checksum".center(20) + "file missing".center(20))
-print("-" * 80)
+print("filename".ljust(40) + "valid checksum".center(20) + "file missing".center(20)) # noqa: NP100
+print("-" * 80) # noqa: NP100
# Output the results as a table
for r in results:
- print(f"{r['filename']:40} {r['valid checksum']:^20} {r['file missing']:^20}")
+ print(f"{r['filename']:40} {r['valid checksum']:^20} {r['file missing']:^20}") # noqa: NP100
diff --git a/sgemm.cpp b/sgemm.cpp
index 4e0159804..40ba9d7e9 100644
--- a/sgemm.cpp
+++ b/sgemm.cpp
@@ -1,6 +1,3 @@
-// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*-
-// vi: set et ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi
-//
// Copyright 2024 Mozilla Foundation
//
// Permission is hereby granted, free of charge, to any person obtaining
@@ -585,15 +582,15 @@ class tinyBLAS_Q0_ARM {
};
#endif // __ARM_FEATURE_DOTPROD
-#if defined(__AVX2__) || defined(__AVX512F__)
+#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
template
-class tinyBLAS_Q0_AVX2 {
+class tinyBLAS_Q0_AVX {
public:
- tinyBLAS_Q0_AVX2(int64_t k,
- const TA *A, int64_t lda,
- const TB *B, int64_t ldb,
- TC *C, int64_t ldc,
- int ith, int nth)
+ tinyBLAS_Q0_AVX(int64_t k,
+ const TA *A, int64_t lda,
+ const TB *B, int64_t ldb,
+ TC *C, int64_t ldc,
+ int ith, int nth)
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
}
@@ -728,14 +725,34 @@ class tinyBLAS_Q0_AVX2 {
__m256 Cv[RN][RM] = {};
for (int64_t l = 0; l < k; ++l)
for (int64_t j = 0; j < RN; ++j)
- for (int64_t i = 0; i < RM; ++i)
+ for (int64_t i = 0; i < RM; ++i) {
+#if defined(__AVX2__)
+ __m256 udTmp = updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
+ load(A + lda * (ii + i) + l)),
+ _mm256_sign_epi8(load(B + ldb * (jj + j) + l),
+ load(A + lda * (ii + i) + l)));
+#else
+ __m128i ali0 = load0(A + lda * (ii + i) + l);
+ __m128i ali1 = load1(A + lda * (ii + i) + l);
+ __m128i blj0 = load0(B + ldb * (jj + j) + l);
+ __m128i blj1 = load1(B + ldb * (jj + j) + l);
+
+ __m128i sepAA0 = _mm_sign_epi8(ali0, ali0);
+ __m128i sepAA1 = _mm_sign_epi8(ali1, ali1);
+ __m128i sepBA0 = _mm_sign_epi8(blj0, ali0);
+ __m128i sepBA1 = _mm_sign_epi8(blj1, ali1);
+
+ // updot
+ const __m128i oneFill = _mm_set1_epi16(1);
+ __m128i mad0 = _mm_maddubs_epi16(sepAA0, sepBA0);
+ __m128i mad1 = _mm_maddubs_epi16(sepAA1, sepBA1);
+ __m256 udTmp = _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_madd_epi16(oneFill, mad1), _mm_madd_epi16(oneFill, mad0)));
+#endif
Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) *
unhalf(B[ldb * (jj + j) + l].d)),
- updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
- load(A + lda * (ii + i) + l)),
- _mm256_sign_epi8(load(B + ldb * (jj + j) + l),
- load(A + lda * (ii + i) + l))),
- Cv[j][i]);
+ udTmp,
+ Cv[j][i]);
+ }
for (int64_t j = 0; j < RN; ++j)
for (int64_t i = 0; i < RM; ++i)
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
@@ -746,10 +763,28 @@ class tinyBLAS_Q0_AVX2 {
return _mm256_loadu_si256((const __m256i *)b->qs);
}
+ inline __m128i load0(const block_q8_0 *b) {
+ return _mm_loadu_si128((const __m128i *)b->qs);
+ }
+
+ inline __m128i load1(const block_q8_0 *b) {
+ return _mm_loadu_si128(((const __m128i *)b->qs) + 1);
+ }
+
inline __m256i load(const block_q4_0 *b) {
return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8));
}
+ inline __m128i load0(const block_q4_0 *b) {
+ const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
+ return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), x), _mm_set1_epi8(8));
+ }
+
+ inline __m128i load1(const block_q4_0 *b) {
+ const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
+ return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)), _mm_set1_epi8(8));
+ }
+
inline __m256 updot(__m256i u, __m256i s) {
__m256i res;
#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__))
@@ -777,7 +812,7 @@ class tinyBLAS_Q0_AVX2 {
const int ith;
const int nth;
};
-#endif // __AVX2__
+#endif // __AVX__
} // namespace
@@ -928,8 +963,8 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
case GGML_TYPE_Q8_0: {
if (Btype != GGML_TYPE_Q8_0)
return false;
-#if defined(__AVX2__) || defined(__AVX512F__)
- tinyBLAS_Q0_AVX2 tb{
+#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
+ tinyBLAS_Q0_AVX tb{
k, (const block_q8_0 *)A, lda,
(const block_q8_0 *)B, ldb,
(float *)C, ldc,
@@ -952,8 +987,8 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
case GGML_TYPE_Q4_0: {
if (Btype != GGML_TYPE_Q8_0)
return false;
-#if defined(__AVX2__) || defined(__AVX512F__)
- tinyBLAS_Q0_AVX2 tb{
+#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
+ tinyBLAS_Q0_AVX tb{
k, (const block_q4_0 *)A, lda,
(const block_q8_0 *)B, ldb,
(float *)C, ldc,
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index d23e7f771..d409a1d6b 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -74,13 +74,17 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf)
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
-llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf)
-llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf)
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
# TODO: enable when fixed
+# https://github.com/ggerganov/llama.cpp/pull/7036
#llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
+#llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf)
+#llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf)
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-qwen2.gguf)
# build test-tokenizer-1-bpe target once and add many tests
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp
index b27c1291e..0d66de5d9 100644
--- a/tests/test-backend-ops.cpp
+++ b/tests/test-backend-ops.cpp
@@ -50,7 +50,7 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m
if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
- } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16) {
+ } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
std::vector dataq(ggml_row_size(tensor->type, size));
std::vector imatrix(tensor->ne[0], 1.0f); // dummy importance matrix
@@ -92,6 +92,8 @@ static std::vector tensor_to_float(const ggml_tensor * t) {
size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
if (t->type == GGML_TYPE_F16) {
tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
+ } else if (t->type == GGML_TYPE_BF16) {
+ tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
} else if (t->type == GGML_TYPE_F32) {
tv.push_back(*(float *) &buf[i]);
} else if (t->type == GGML_TYPE_I32) {
@@ -1898,7 +1900,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
std::default_random_engine rng(0);
const ggml_type all_types[] = {
- GGML_TYPE_F32, GGML_TYPE_F16,
+ GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16,
GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
GGML_TYPE_Q8_0,
@@ -2173,7 +2175,11 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
test_cases.emplace_back(new test_timestep_embedding());
test_cases.emplace_back(new test_leaky_relu());
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ for (int hs : { 64, 128, }) { // other head sizes not implemented
+#else
for (int hs : { 64, 80, 128, 256, }) {
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
for (int nh : { 32, }) {
for (int kv : { 512, 1024, }) {
for (int nb : { 1, 2, 4, 8, }) {
diff --git a/tests/test-json-schema-to-grammar.cpp b/tests/test-json-schema-to-grammar.cpp
index b2ce4d260..c5361b5b8 100755
--- a/tests/test-json-schema-to-grammar.cpp
+++ b/tests/test-json-schema-to-grammar.cpp
@@ -2,6 +2,7 @@
#undef NDEBUG
#endif
+#include
#include
#include
#include
diff --git a/tests/test-tokenizer-0-bpe.py b/tests/test-tokenizer-0-bpe.py
deleted file mode 100644
index 33a272441..000000000
--- a/tests/test-tokenizer-0-bpe.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# tests with BPE tokenizer
-#
-# sample usage:
-#
-# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/Meta-Llama-3-8B-Instruct/
-# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/falcon-7b/
-# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/deepseek-coder-6.7b-instruct/
-#
-
-import argparse
-
-from transformers import AutoTokenizer
-
-parser = argparse.ArgumentParser()
-parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
-parser.add_argument("--fname-tok", help="path to a text file to tokenize")
-args = parser.parse_args()
-
-dir_tokenizer = args.dir_tokenizer
-
-tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
-
-tests = [
- "",
- " ",
- " ",
- " ",
- "\t",
- "\n",
- "\n\n",
- "\n\n\n",
- "\t\n",
- "Hello world",
- " Hello world",
- "Hello World",
- " Hello World",
- " Hello World!",
- "Hello, world!",
- " Hello, world!",
- " this is 🦙.cpp",
- "w048 7tuijk dsdfhu",
- "нещо на Български",
- "កាន់តែពិសេសអាចខលចេញ",
- "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
- "Hello",
- " Hello",
- " Hello",
- " Hello",
- " Hello",
- " Hello\n Hello",
- " (",
- "\n =",
- "' era",
- "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
- "3",
- "33",
- "333",
- "3333",
- "33333",
- "333333",
- "3333333",
- "33333333",
- "333333333",
-]
-
-for text in tests:
- print('text: ', text)
- print(tokenizer.encode(text))
- print(tokenizer.decode(tokenizer.encode(text)))
-
-print("\n\ntests for C++:\n")
-for text in tests:
- res = tokenizer.encode(text)
-
- k = text.replace('\n', '\\n')
- k = k.replace('\t', '\\t')
- k = '"' + k + '"'
- print("{ %-24s, { " % k, end='')
- for x in res:
- print("%7d," % x, end='')
- print(" }, },")
-
-print(tokenizer.encode('hello'))
-print(tokenizer.encode('world'))
-print(tokenizer.encode(' world'))
-print(tokenizer.encode('hello world'))
-
-fname_tok = args.fname_tok
-if fname_tok:
- print('tokenizing file: ', fname_tok)
- fname_out = fname_tok + '.tok'
- with open(fname_tok, 'r', encoding='utf-8') as f:
- lines = f.readlines()
- s = ''.join(lines)
- res = tokenizer.encode(s)
- # write to file
- with open(fname_out, 'w', encoding='utf-8') as f:
- for x in res:
- # LLaMA v3 for some reason strips the space for these tokens (and others)
- # if x == 662:
- # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
- # elif x == 1174:
- # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
- # elif x == 2564:
- # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
- # elif x == 758:
- # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
- # elif x == 949:
- # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
- # elif x == 5354:
- # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
- # else:
- # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
- f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
- print('len(res): ', len(res))
- print('len(lines): ', len(lines))
- print('results written to: ', fname_out)
diff --git a/tests/test-tokenizer-0-spm.py b/tests/test-tokenizer-0-spm.py
deleted file mode 100644
index be12a6b93..000000000
--- a/tests/test-tokenizer-0-spm.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# tests with SPM tokenizer
-#
-# sample usage:
-#
-# python3 tests/test-tokenizer-0-spm.py ~/Data/huggingface/Llama-2-7b-hf/
-# python3 tests/test-tokenizer-0-spm.py ~/Data/huggingface/CodeLlama-34b-Instruct-hf/
-#
-
-
-import argparse
-
-from sentencepiece import SentencePieceProcessor
-
-parser = argparse.ArgumentParser()
-parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
-parser.add_argument("--fname-tok", help="path to a text file to tokenize")
-args = parser.parse_args()
-
-dir_tokenizer = args.dir_tokenizer
-
-tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
-
-tests = [
- "",
- " ",
- " ",
- " ",
- "\t",
- "\n",
- "\n\n",
- "\n\n\n",
- "\t\n",
- "Hello world",
- " Hello world",
- "Hello World",
- " Hello World",
- " Hello World!",
- "Hello, world!",
- " Hello, world!",
- " this is 🦙.cpp",
- "w048 7tuijk dsdfhu",
- "нещо на Български",
- "កាន់តែពិសេសអាចខលចេញ",
- "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
- "Hello",
- " Hello",
- " Hello",
- " Hello",
- " Hello",
- " Hello\n Hello",
- " (",
- "\n =",
- "' era",
- "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
- "3",
- "33",
- "333",
- "3333",
- "33333",
- "333333",
- "3333333",
- "33333333",
- "333333333",
-]
-
-
-for text in tests:
- print('text: ', text)
- print('\nwith bos:')
- print(tokenizer.encode(text, add_bos=True))
- print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))
- print('\nwithout bos:')
- print(tokenizer.encode(text, add_bos=False))
- print(tokenizer.decode(tokenizer.encode(text, add_bos=False)))
-
-print("'" + tokenizer.id_to_piece(15043) + "'") # '_Hello'
-print("'" + tokenizer.id_to_piece(29871) + "'") # '_'
-print("'" + tokenizer.decode([15043]) + "'") # 'Hello'
-print("'" + tokenizer.decode([15043, 15043]) + "'") # 'Hello Hello'
-print("'" + tokenizer.decode([29871, 15043]) + "'") # ' Hello'
-print("'" + tokenizer.decode([29871, 15043, 29871, 15043]) + "'") # ' Hello Hello'
-
-print("\n\ntests for C++:\n")
-for text in tests:
- res = tokenizer.encode(text, add_bos=False)
-
- k = text.replace('\n', '\\n')
- k = k.replace('\t', '\\t')
- k = '"' + k + '"'
- print("{ %-24s, { " % k, end='')
- for x in res:
- print("%7d," % x, end='')
- print(" }, },")
-
-print(tokenizer.encode('hello'))
-print(tokenizer.encode('world'))
-print(tokenizer.encode(' world'))
-print(tokenizer.encode('hello world'))
-
-fname_tok = args.fname_tok
-if fname_tok:
- print('tokenizing file: ', fname_tok)
- fname_out = fname_tok + '.tok'
- with open(fname_tok, 'r', encoding='utf-8') as f:
- lines = f.readlines()
- s = ''.join(lines)
- res = tokenizer.encode(s, add_bos=True)
- # write to file
- with open(fname_out, 'w', encoding='utf-8') as f:
- for x in res:
- f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
- print('len(res): ', len(res))
- print('len(lines): ', len(lines))
- print('results written to: ', fname_out)
diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp
index 5122757cf..d478f1041 100644
--- a/tests/test-tokenizer-0.cpp
+++ b/tests/test-tokenizer-0.cpp
@@ -55,8 +55,10 @@
// return _k_tests;
//}
-static std::map> read_tests(const std::string & fname_inp, const std::string & fname_out) {
- std::map> tests;
+using llama_tests = std::map>;
+
+static llama_tests read_tests(const std::string & fname_inp, const std::string & fname_out) {
+ llama_tests tests;
std::ifstream ifs_inp(fname_inp);
if (!ifs_inp) {
@@ -175,12 +177,20 @@ int main(int argc, char **argv) {
bool success = true;
- const auto k_tests = read_tests(fname_inp, fname_out);
+ const auto k_tests = [&]() -> llama_tests {
+ if (!fname_text.empty()) {
+ return {};
+ }
- if (k_tests.empty()) {
- fprintf(stderr, "%s : error: no tests found\n", __func__);
- return 1;
- }
+ const auto res = read_tests(fname_inp, fname_out);
+
+ if (res.empty()) {
+ fprintf(stderr, "%s : error: no tests found\n", __func__);
+ exit(1);
+ }
+
+ return res;
+ }();
const bool add_special = false;
@@ -238,7 +248,17 @@ int main(int argc, char **argv) {
fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
- const std::vector res = llama_tokenize(ctx, text, add_special);
+ std::vector res;
+
+ {
+ const auto t_start = ggml_time_us();
+
+ res = llama_tokenize(ctx, text, add_special);
+
+ const auto t_end = ggml_time_us();
+
+ fprintf(stderr, "%s : tokenized in %.3f ms (cpp)\n", __func__, (t_end - t_start) / 1000.0);
+ }
fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
@@ -252,7 +272,8 @@ int main(int argc, char **argv) {
}
for (const auto & tok : res) {
- ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector{tok})) << "'" << std::endl;
+ //ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector{tok})) << "'" << std::endl;
+ ofs << tok << "\n";
}
}
diff --git a/tests/test-tokenizer-0.py b/tests/test-tokenizer-0.py
new file mode 100644
index 000000000..cd760d1ce
--- /dev/null
+++ b/tests/test-tokenizer-0.py
@@ -0,0 +1,46 @@
+import time
+import argparse
+
+from transformers import AutoTokenizer
+
+parser = argparse.ArgumentParser()
+parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
+parser.add_argument("--fname-tok", help="path to a text file to tokenize", required=True)
+args = parser.parse_args()
+
+dir_tokenizer = args.dir_tokenizer
+fname_tok = args.fname_tok
+
+tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
+
+print('tokenizing file: ', fname_tok) # noqa: NP100
+fname_out = fname_tok + '.tok'
+with open(fname_tok, 'r', encoding='utf-8') as f:
+ lines = f.readlines()
+ s = ''.join(lines)
+ t_start = time.time()
+ res = tokenizer.encode(s, add_special_tokens=False)
+ t_end = time.time()
+ print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') # noqa: NP100
+ with open(fname_out, 'w', encoding='utf-8') as f:
+ for x in res:
+ # LLaMA v3 for some reason strips the space for these tokens (and others)
+ # if x == 662:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 1174:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 2564:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 758:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 949:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 5354:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # else:
+ # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
+ # f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
+ f.write(str(x) + '\n')
+ print('len(res): ', len(res)) # noqa: NP100
+ print('len(lines): ', len(lines)) # noqa: NP100
+print('results written to: ', fname_out) # noqa: NP100
diff --git a/tests/test-tokenizer-0.sh b/tests/test-tokenizer-0.sh
new file mode 100755
index 000000000..2fb8632d8
--- /dev/null
+++ b/tests/test-tokenizer-0.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Usage:
+#
+# test-tokenizer-0.sh
+#
+
+if [ $# -ne 2 ]; then
+ printf "Usage: $0 \n"
+ exit 1
+fi
+
+name=$1
+input=$2
+
+make -j tests/test-tokenizer-0
+
+printf "Testing %s on %s ...\n" $name $input
+
+python3 ./tests/test-tokenizer-0.py ./models/tokenizers/$name --fname-tok $input > /tmp/test-tokenizer-0-$name-py.log 2>&1
+cat /tmp/test-tokenizer-0-$name-py.log | grep "tokenized in"
+
+./tests/test-tokenizer-0 ./models/ggml-vocab-$name.gguf $input > /tmp/test-tokenizer-0-$name-cpp.log 2>&1
+cat /tmp/test-tokenizer-0-$name-cpp.log | grep "tokenized in"
+
+diff $input.tok $input.tokcpp > /dev/null 2>&1
+
+if [ $? -eq 0 ]; then
+ printf "Tokenization is correct!\n"
+else
+ diff $input.tok $input.tokcpp | head -n 32
+
+ printf "Tokenization differs!\n"
+fi
diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py
new file mode 100644
index 000000000..5b2ab8ef7
--- /dev/null
+++ b/tests/test-tokenizer-random.py
@@ -0,0 +1,295 @@
+# Test libllama tokenizer == AutoTokenizer.
+# Brute force random tokens/text generation.
+#
+# Sample usage:
+#
+# python3 tests/test-tokenizer-random.py ./models/ggml-vocab-llama-bpe.gguf ./models/tokenizers/llama-bpe
+#
+
+import time
+import logging
+import argparse
+import subprocess
+import random
+
+from typing import Iterator
+
+import cffi
+from transformers import AutoTokenizer, PreTrainedTokenizerBase
+
+logger = logging.getLogger("test-tokenizer-random-bpe")
+
+
+class LibLlama:
+
+ DEFAULT_PATH_LLAMA_H = "./llama.h"
+ DEFAULT_PATH_LIBLLAMA = "./build/libllama.so" # CMakeLists.txt: BUILD_SHARED_LIBS ON
+
+ def __init__(self, path_llama_h: str = None, path_libllama: str = None):
+ path_llama_h = path_llama_h or self.DEFAULT_PATH_LLAMA_H
+ path_libllama = path_libllama or self.DEFAULT_PATH_LIBLLAMA
+ (self.ffi, self.lib) = self._load_libllama_cffi(path_llama_h, path_libllama)
+ self.lib.llama_backend_init()
+
+ def _load_libllama_cffi(self, path_llama_h: str, path_libllama: str):
+ cmd = ["gcc", "-E", "-P", "-D__restrict=", "-D__attribute__(x)=", "-D__asm__(x)=", path_llama_h]
+ res = subprocess.run(cmd, stdout=subprocess.PIPE)
+ assert (res.returncode == 0)
+ source = res.stdout.decode()
+ ffi = cffi.FFI()
+ if True: # workarounds for pycparser
+ source = "typedef struct { } __builtin_va_list;" + "\n" + source
+ source = source.replace("sizeof (int)", str(ffi.sizeof("int")))
+ source = source.replace("sizeof (void *)", str(ffi.sizeof("void*")))
+ source = source.replace("sizeof (size_t)", str(ffi.sizeof("size_t")))
+ source = source.replace("sizeof(int32_t)", str(ffi.sizeof("int32_t")))
+ ffi.cdef(source, override=True)
+ lib = ffi.dlopen(path_libllama)
+ return (ffi, lib)
+
+ def model_default_params(self, **kwargs):
+ mparams = self.lib.llama_model_default_params()
+ for k, v in kwargs.items():
+ setattr(mparams, k, v)
+ return mparams
+
+ def context_default_params(self, **kwargs):
+ cparams = self.lib.llama_context_default_params()
+ for k, v in kwargs.items():
+ setattr(cparams, k, v)
+ return cparams
+
+
+class LibLlamaModel:
+
+ def __init__(self, libllama: LibLlama, path_model: str, mparams={}, cparams={}):
+ self.lib = libllama.lib
+ self.ffi = libllama.ffi
+ if isinstance(mparams, dict):
+ mparams = libllama.model_default_params(**mparams)
+ self.model = self.lib.llama_load_model_from_file(path_model.encode(), mparams)
+ if not self.model:
+ raise RuntimeError("error: failed to load model '%s'" % path_model)
+ if isinstance(cparams, dict):
+ cparams = libllama.context_default_params(**cparams)
+ self.ctx = self.lib.llama_new_context_with_model(self.model, cparams)
+ if not self.ctx:
+ raise RuntimeError("error: failed to create context for model '%s'" % path_model)
+ n_tokens_max = self.lib.llama_n_ctx(self.ctx)
+ self.token_ids = self.ffi.new("llama_token[]", n_tokens_max)
+
+ def free(self):
+ if self.ctx:
+ self.lib.llama_free(self.ctx)
+ if self.model:
+ self.lib.llama_free_model(self.model)
+ self.ctx = None
+ self.model = None
+ self.lib = None
+
+ def tokenize(self, text: str, n_tokens_max: int = 0, add_special: bool = False, parse_special: bool = False) -> list[int]:
+ n_tokens_max = n_tokens_max if n_tokens_max > 0 else len(self.token_ids)
+ text = text.encode("utf-8")
+ num = self.lib.llama_tokenize(self.model, text, len(text), self.token_ids, n_tokens_max, add_special, parse_special)
+ if num < 0:
+ return []
+ return list(self.token_ids[0:num])
+
+
+def generator_custom_text() -> Iterator[str]:
+ """General tests"""
+ yield from [
+ "",
+ " ",
+ " ",
+ " ",
+ "\t",
+ "\n",
+ "\n\n",
+ "\n\n\n",
+ "\t\n",
+ "Hello world",
+ " Hello world",
+ "Hello World",
+ " Hello World",
+ " Hello World!",
+ "Hello, world!",
+ " Hello, world!",
+ " this is 🦙.cpp",
+ "w048 7tuijk dsdfhu",
+ "нещо на Български",
+ "កាន់តែពិសេសអាចខលចេញ",
+ "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
+ "Hello",
+ " Hello",
+ " Hello",
+ " Hello",
+ " Hello",
+ " Hello\n Hello",
+ " (",
+ "\n =",
+ "' era",
+ "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
+ "3",
+ "33",
+ "333",
+ "3333",
+ "33333",
+ "333333",
+ "3333333",
+ "33333333",
+ "333333333",
+ ]
+
+
+def generator_custom_text_edge_cases() -> Iterator[str]:
+ """Edge cases found while debugging"""
+ yield from [
+ '\x1f-a', # unicode_ranges_control, {0x00001C, 0x00001F}
+ '¼-a', # unicode_ranges_digit, 0x00BC
+ '½-a', # unicode_ranges_digit, 0x00BD
+ '¾-a', # unicode_ranges_digit, 0x00BE
+ 'a 〇b', # unicode_ranges_digit, 0x3007
+ 'Ⅵ-a', # unicode_ranges_digit, {0x00002150, 0x0000218F} // Number Forms
+ '\uFEFF//', # unicode_ranges_control, 0xFEFF (BOM)
+ 'a' # TODO: Phi-3 fail
+ ]
+
+
+def generator_random_chars(iterations = 100) -> Iterator[str]:
+ """Brute force random text with simple characters"""
+
+ WHITESPACES = list(" " * 20 + "\n" * 5 + "\r\n" * 5 + "\t" * 5)
+ CHARS = list(set("""
+ ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ abcdefghijklmnopqrstuvwxyz
+ ÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛÄËÏÖÜ
+ áéíóúàèìòùâêîôûäëïöü
+ .-,*/-+ª!"·$%&/()=?¿[]{}<>\\|@#~½¬~;:_
+ """))
+
+ rand = random.Random()
+ for m in range(iterations):
+ rand.seed(m)
+ text = []
+ num_words = rand.randint(300, 400)
+ for i in range(num_words):
+ k = rand.randint(1, 7)
+ word = rand.choices(CHARS, k=k)
+ space = rand.choice(WHITESPACES)
+ text.append("".join(word) + space)
+ yield "".join(text)
+
+
+def generator_random_vocab_chars(tokenizer: PreTrainedTokenizerBase, iterations = 100) -> Iterator[str]:
+ """Brute force random text with vocab characters"""
+
+ vocab_ids = list(tokenizer.vocab.values())
+ vocab_text = tokenizer.decode(vocab_ids, skip_special_tokens=True)
+ vocab_chars = list(set(vocab_text))
+ del vocab_ids, vocab_text
+
+ rand = random.Random()
+ for m in range(iterations):
+ rand.seed(m)
+ text = rand.choices(vocab_chars, k=1024)
+ yield "".join(text)
+
+
+def generator_random_vocab_tokens(tokenizer: PreTrainedTokenizerBase, iterations = 100) -> Iterator[str]:
+ """Brute force random text from vocab tokens"""
+
+ space_id = tokenizer.encode(" ", add_special_tokens=False)[0]
+ vocab_ids = list(tokenizer.vocab.values())
+ vocab_ids = list(sorted(vocab_ids + vocab_ids))
+ for i in range(1, len(vocab_ids), 2):
+ vocab_ids[i] = space_id
+ vocab_tokens = tokenizer.decode(vocab_ids, skip_special_tokens=True)
+ vocab_tokens = vocab_tokens.split(" ")
+ del vocab_ids
+
+ yield from vocab_tokens
+
+ rand = random.Random()
+ for m in range(iterations):
+ rand.seed(m)
+ text = []
+ num_words = rand.randint(300, 400)
+ for i in range(num_words):
+ k = rand.randint(1, 3)
+ tokens = rand.choices(vocab_tokens, k=k)
+ tokens = [t.strip(" \n\r\t") for t in tokens]
+ sep = rand.choice(" \n\r\t")
+ text.append("".join(tokens) + sep)
+ yield "".join(text)
+
+
+def generator_random_bytes(iterations = 100) -> Iterator[str]:
+ """Brute force random bytes"""
+
+ WHITESPACES = list(" " * 20 + "\n" * 5 + "\r\n" * 5 + "\t" * 5)
+
+ rand = random.Random()
+ for m in range(iterations):
+ rand.seed(m)
+ text = []
+ num_words = rand.randint(300, 400)
+ for i in range(num_words):
+ k = rand.randint(1, 8)
+ word = [chr(r) for r in rand.randbytes(k) if r]
+ word.append(rand.choice(WHITESPACES))
+ text.append("".join(word))
+ yield "".join(text)
+
+
+def test_compare_tokenizer(model: LibLlamaModel, tokenizer: PreTrainedTokenizerBase, generator: Iterator[str]):
+
+ def find_first_mismatch(ids1: list[int], ids2: list[int]):
+ for i, (a,b) in enumerate(zip(ids1, ids2)):
+ if a != b:
+ return i
+ if len(ids1) == len(ids2):
+ return -1
+ return min(len(ids1), len(ids2))
+
+ t0 = time.perf_counter()
+ logger.info("%s: %s" % (generator.__name__, "ini"))
+ for text in generator:
+ ids1 = model.tokenize(text, add_special=False, parse_special=False)
+ ids2 = tokenizer.encode(text, add_special_tokens=False)
+ if ids1 != ids2:
+ i = find_first_mismatch(ids1, ids2)
+ ids1 = list(ids1)[max(0, i - 2) : i + 2 + 1]
+ ids2 = list(ids2)[max(0, i - 2) : i + 2 + 1]
+ text2 = tokenizer.decode(ids2, skip_special_tokens=True)
+ assert (text2 in text)
+ logger.info(" Text: " + repr(text2))
+ logger.info(" TokenIDs: " + str(ids1))
+ logger.info(" Expected: " + str(ids2))
+ raise Exception()
+ t1 = time.perf_counter()
+ logger.info("%s: end, time: %.3f secs" % (generator.__name__, t1 - t0))
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("vocab_file", help="path to vocab 'gguf' file")
+ parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+ args = parser.parse_args()
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ model = LibLlamaModel(LibLlama(), args.vocab_file, mparams=dict(vocab_only=True), cparams=dict(n_ctx=2048))
+
+ tokenizer = AutoTokenizer.from_pretrained(args.dir_tokenizer)
+
+ test_compare_tokenizer(model, tokenizer, generator_custom_text())
+ test_compare_tokenizer(model, tokenizer, generator_custom_text_edge_cases())
+ test_compare_tokenizer(model, tokenizer, generator_random_chars(10_000))
+ test_compare_tokenizer(model, tokenizer, generator_random_vocab_chars(tokenizer, 10_000))
+ test_compare_tokenizer(model, tokenizer, generator_random_vocab_tokens(tokenizer, 10_000))
+ # test_compare_tokenizer(model, tokenizer, generator_random_bytes(10_000)) # FAIL
+
+ model.free()
diff --git a/unicode-data.cpp b/unicode-data.cpp
index e6bafb3a9..c54175fc3 100644
--- a/unicode-data.cpp
+++ b/unicode-data.cpp
@@ -5,27 +5,46 @@
#include
#include
-const std::vector> unicode_ranges_digit = {
-{0x00000030, 0x00000039}, {0x000000B2, 0x000000B3}, {0x000000B9, 0x000000B9}, {0x00000660, 0x00000669},
-{0x000006F0, 0x000006F9}, {0x000007C0, 0x000007C9}, {0x00000966, 0x0000096F}, {0x000009E6, 0x000009EF},
-{0x00000A66, 0x00000A6F}, {0x00000AE6, 0x00000AEF}, {0x00000B66, 0x00000B6F}, {0x00000BE6, 0x00000BEF},
-{0x00000C66, 0x00000C6F}, {0x00000CE6, 0x00000CEF}, {0x00000D66, 0x00000D6F}, {0x00000DE6, 0x00000DEF},
-{0x00000E50, 0x00000E59}, {0x00000ED0, 0x00000ED9}, {0x00000F20, 0x00000F29}, {0x00001040, 0x00001049},
-{0x00001090, 0x00001099}, {0x00001369, 0x00001371}, {0x000017E0, 0x000017E9}, {0x00001810, 0x00001819},
-{0x00001946, 0x0000194F}, {0x000019D0, 0x000019DA}, {0x00001A80, 0x00001A89}, {0x00001A90, 0x00001A99},
-{0x00001B50, 0x00001B59}, {0x00001BB0, 0x00001BB9}, {0x00001C40, 0x00001C49}, {0x00001C50, 0x00001C59},
-{0x00002070, 0x00002070}, {0x00002074, 0x00002079}, {0x00002080, 0x00002089}, {0x00002460, 0x00002468},
-{0x00002474, 0x0000247C}, {0x00002488, 0x00002490}, {0x000024EA, 0x000024EA}, {0x000024F5, 0x000024FD},
-{0x000024FF, 0x000024FF}, {0x00002776, 0x0000277E}, {0x00002780, 0x00002788}, {0x0000278A, 0x00002792},
-{0x0000A620, 0x0000A629}, {0x0000A8D0, 0x0000A8D9}, {0x0000A900, 0x0000A909}, {0x0000A9D0, 0x0000A9D9},
-{0x0000A9F0, 0x0000A9F9}, {0x0000AA50, 0x0000AA59}, {0x0000ABF0, 0x0000ABF9}, {0x0000FF10, 0x0000FF19},
-{0x000104A0, 0x000104A9}, {0x00010A40, 0x00010A43}, {0x00010D30, 0x00010D39}, {0x00010E60, 0x00010E68},
-{0x00011052, 0x0001105A}, {0x00011066, 0x0001106F}, {0x000110F0, 0x000110F9}, {0x00011136, 0x0001113F},
-{0x000111D0, 0x000111D9}, {0x000112F0, 0x000112F9}, {0x00011450, 0x00011459}, {0x000114D0, 0x000114D9},
-{0x00011650, 0x00011659}, {0x000116C0, 0x000116C9}, {0x00011730, 0x00011739}, {0x000118E0, 0x000118E9},
-{0x00011950, 0x00011959}, {0x00011C50, 0x00011C59}, {0x00011D50, 0x00011D59}, {0x00011DA0, 0x00011DA9},
-{0x00016A60, 0x00016A69}, {0x00016B50, 0x00016B59}, {0x0001D7CE, 0x0001D7FF}, {0x0001E140, 0x0001E149},
-{0x0001E2F0, 0x0001E2F9}, {0x0001E950, 0x0001E959}, {0x0001F100, 0x0001F10A}, {0x0001FBF0, 0x0001FBF9},
+// generated with scripts/gen-unicode-data.py
+//
+// TODO: generate unicode_map_nfd
+
+const std::vector> unicode_ranges_number = {
+{0x00000030, 0x00000039}, {0x000000B2, 0x000000B3}, {0x000000B9, 0x000000B9}, {0x000000BC, 0x000000BE},
+{0x00000660, 0x00000669}, {0x000006F0, 0x000006F9}, {0x000007C0, 0x000007C9}, {0x00000966, 0x0000096F},
+{0x000009E6, 0x000009EF}, {0x000009F4, 0x000009F9}, {0x00000A66, 0x00000A6F}, {0x00000AE6, 0x00000AEF},
+{0x00000B66, 0x00000B6F}, {0x00000B72, 0x00000B77}, {0x00000BE6, 0x00000BF2}, {0x00000C66, 0x00000C6F},
+{0x00000C78, 0x00000C7E}, {0x00000CE6, 0x00000CEF}, {0x00000D58, 0x00000D5E}, {0x00000D66, 0x00000D78},
+{0x00000DE6, 0x00000DEF}, {0x00000E50, 0x00000E59}, {0x00000ED0, 0x00000ED9}, {0x00000F20, 0x00000F33},
+{0x00001040, 0x00001049}, {0x00001090, 0x00001099}, {0x00001369, 0x0000137C}, {0x000016EE, 0x000016F0},
+{0x000017E0, 0x000017E9}, {0x000017F0, 0x000017F9}, {0x00001810, 0x00001819}, {0x00001946, 0x0000194F},
+{0x000019D0, 0x000019DA}, {0x00001A80, 0x00001A89}, {0x00001A90, 0x00001A99}, {0x00001B50, 0x00001B59},
+{0x00001BB0, 0x00001BB9}, {0x00001C40, 0x00001C49}, {0x00001C50, 0x00001C59}, {0x00002070, 0x00002070},
+{0x00002074, 0x00002079}, {0x00002080, 0x00002089}, {0x00002150, 0x00002182}, {0x00002185, 0x00002189},
+{0x00002460, 0x0000249B}, {0x000024EA, 0x000024FF}, {0x00002776, 0x00002793}, {0x00002CFD, 0x00002CFD},
+{0x00003007, 0x00003007}, {0x00003021, 0x00003029}, {0x00003038, 0x0000303A}, {0x00003192, 0x00003195},
+{0x00003220, 0x00003229}, {0x00003248, 0x0000324F}, {0x00003251, 0x0000325F}, {0x00003280, 0x00003289},
+{0x000032B1, 0x000032BF}, {0x0000A620, 0x0000A629}, {0x0000A6E6, 0x0000A6EF}, {0x0000A830, 0x0000A835},
+{0x0000A8D0, 0x0000A8D9}, {0x0000A900, 0x0000A909}, {0x0000A9D0, 0x0000A9D9}, {0x0000A9F0, 0x0000A9F9},
+{0x0000AA50, 0x0000AA59}, {0x0000ABF0, 0x0000ABF9}, {0x0000FF10, 0x0000FF19}, {0x00010107, 0x00010133},
+{0x00010140, 0x00010178}, {0x0001018A, 0x0001018B}, {0x000102E1, 0x000102FB}, {0x00010320, 0x00010323},
+{0x00010341, 0x00010341}, {0x0001034A, 0x0001034A}, {0x000103D1, 0x000103D5}, {0x000104A0, 0x000104A9},
+{0x00010858, 0x0001085F}, {0x00010879, 0x0001087F}, {0x000108A7, 0x000108AF}, {0x000108FB, 0x000108FF},
+{0x00010916, 0x0001091B}, {0x000109BC, 0x000109BD}, {0x000109C0, 0x000109CF}, {0x000109D2, 0x000109FF},
+{0x00010A40, 0x00010A48}, {0x00010A7D, 0x00010A7E}, {0x00010A9D, 0x00010A9F}, {0x00010AEB, 0x00010AEF},
+{0x00010B58, 0x00010B5F}, {0x00010B78, 0x00010B7F}, {0x00010BA9, 0x00010BAF}, {0x00010CFA, 0x00010CFF},
+{0x00010D30, 0x00010D39}, {0x00010E60, 0x00010E7E}, {0x00010F1D, 0x00010F26}, {0x00010F51, 0x00010F54},
+{0x00010FC5, 0x00010FCB}, {0x00011052, 0x0001106F}, {0x000110F0, 0x000110F9}, {0x00011136, 0x0001113F},
+{0x000111D0, 0x000111D9}, {0x000111E1, 0x000111F4}, {0x000112F0, 0x000112F9}, {0x00011450, 0x00011459},
+{0x000114D0, 0x000114D9}, {0x00011650, 0x00011659}, {0x000116C0, 0x000116C9}, {0x00011730, 0x0001173B},
+{0x000118E0, 0x000118F2}, {0x00011950, 0x00011959}, {0x00011C50, 0x00011C6C}, {0x00011D50, 0x00011D59},
+{0x00011DA0, 0x00011DA9}, {0x00011F50, 0x00011F59}, {0x00011FC0, 0x00011FD4}, {0x00012400, 0x0001246E},
+{0x00016A60, 0x00016A69}, {0x00016AC0, 0x00016AC9}, {0x00016B50, 0x00016B59}, {0x00016B5B, 0x00016B61},
+{0x00016E80, 0x00016E96}, {0x0001D2C0, 0x0001D2D3}, {0x0001D2E0, 0x0001D2F3}, {0x0001D360, 0x0001D378},
+{0x0001D7CE, 0x0001D7FF}, {0x0001E140, 0x0001E149}, {0x0001E2F0, 0x0001E2F9}, {0x0001E4F0, 0x0001E4F9},
+{0x0001E8C7, 0x0001E8CF}, {0x0001E950, 0x0001E959}, {0x0001EC71, 0x0001ECAB}, {0x0001ECAD, 0x0001ECAF},
+{0x0001ECB1, 0x0001ECB4}, {0x0001ED01, 0x0001ED2D}, {0x0001ED2F, 0x0001ED3D}, {0x0001F100, 0x0001F10C},
+{0x0001FBF0, 0x0001FBF9},
};
const std::vector> unicode_ranges_letter = {
@@ -41,73 +60,73 @@ const std::vector> unicode_ranges_letter = {
{0x00000710, 0x00000710}, {0x00000712, 0x0000072F}, {0x0000074D, 0x000007A5}, {0x000007B1, 0x000007B1},
{0x000007CA, 0x000007EA}, {0x000007F4, 0x000007F5}, {0x000007FA, 0x000007FA}, {0x00000800, 0x00000815},
{0x0000081A, 0x0000081A}, {0x00000824, 0x00000824}, {0x00000828, 0x00000828}, {0x00000840, 0x00000858},
-{0x00000860, 0x0000086A}, {0x000008A0, 0x000008B4}, {0x000008B6, 0x000008C7}, {0x00000904, 0x00000939},
-{0x0000093D, 0x0000093D}, {0x00000950, 0x00000950}, {0x00000958, 0x00000961}, {0x00000971, 0x00000980},
-{0x00000985, 0x0000098C}, {0x0000098F, 0x00000990}, {0x00000993, 0x000009A8}, {0x000009AA, 0x000009B0},
-{0x000009B2, 0x000009B2}, {0x000009B6, 0x000009B9}, {0x000009BD, 0x000009BD}, {0x000009CE, 0x000009CE},
-{0x000009DC, 0x000009DD}, {0x000009DF, 0x000009E1}, {0x000009F0, 0x000009F1}, {0x000009FC, 0x000009FC},
-{0x00000A05, 0x00000A0A}, {0x00000A0F, 0x00000A10}, {0x00000A13, 0x00000A28}, {0x00000A2A, 0x00000A30},
-{0x00000A32, 0x00000A33}, {0x00000A35, 0x00000A36}, {0x00000A38, 0x00000A39}, {0x00000A59, 0x00000A5C},
-{0x00000A5E, 0x00000A5E}, {0x00000A72, 0x00000A74}, {0x00000A85, 0x00000A8D}, {0x00000A8F, 0x00000A91},
-{0x00000A93, 0x00000AA8}, {0x00000AAA, 0x00000AB0}, {0x00000AB2, 0x00000AB3}, {0x00000AB5, 0x00000AB9},
-{0x00000ABD, 0x00000ABD}, {0x00000AD0, 0x00000AD0}, {0x00000AE0, 0x00000AE1}, {0x00000AF9, 0x00000AF9},
-{0x00000B05, 0x00000B0C}, {0x00000B0F, 0x00000B10}, {0x00000B13, 0x00000B28}, {0x00000B2A, 0x00000B30},
-{0x00000B32, 0x00000B33}, {0x00000B35, 0x00000B39}, {0x00000B3D, 0x00000B3D}, {0x00000B5C, 0x00000B5D},
-{0x00000B5F, 0x00000B61}, {0x00000B71, 0x00000B71}, {0x00000B83, 0x00000B83}, {0x00000B85, 0x00000B8A},
-{0x00000B8E, 0x00000B90}, {0x00000B92, 0x00000B95}, {0x00000B99, 0x00000B9A}, {0x00000B9C, 0x00000B9C},
-{0x00000B9E, 0x00000B9F}, {0x00000BA3, 0x00000BA4}, {0x00000BA8, 0x00000BAA}, {0x00000BAE, 0x00000BB9},
-{0x00000BD0, 0x00000BD0}, {0x00000C05, 0x00000C0C}, {0x00000C0E, 0x00000C10}, {0x00000C12, 0x00000C28},
-{0x00000C2A, 0x00000C39}, {0x00000C3D, 0x00000C3D}, {0x00000C58, 0x00000C5A}, {0x00000C60, 0x00000C61},
-{0x00000C80, 0x00000C80}, {0x00000C85, 0x00000C8C}, {0x00000C8E, 0x00000C90}, {0x00000C92, 0x00000CA8},
-{0x00000CAA, 0x00000CB3}, {0x00000CB5, 0x00000CB9}, {0x00000CBD, 0x00000CBD}, {0x00000CDE, 0x00000CDE},
-{0x00000CE0, 0x00000CE1}, {0x00000CF1, 0x00000CF2}, {0x00000D04, 0x00000D0C}, {0x00000D0E, 0x00000D10},
-{0x00000D12, 0x00000D3A}, {0x00000D3D, 0x00000D3D}, {0x00000D4E, 0x00000D4E}, {0x00000D54, 0x00000D56},
-{0x00000D5F, 0x00000D61}, {0x00000D7A, 0x00000D7F}, {0x00000D85, 0x00000D96}, {0x00000D9A, 0x00000DB1},
-{0x00000DB3, 0x00000DBB}, {0x00000DBD, 0x00000DBD}, {0x00000DC0, 0x00000DC6}, {0x00000E01, 0x00000E30},
-{0x00000E32, 0x00000E33}, {0x00000E40, 0x00000E46}, {0x00000E81, 0x00000E82}, {0x00000E84, 0x00000E84},
-{0x00000E86, 0x00000E8A}, {0x00000E8C, 0x00000EA3}, {0x00000EA5, 0x00000EA5}, {0x00000EA7, 0x00000EB0},
-{0x00000EB2, 0x00000EB3}, {0x00000EBD, 0x00000EBD}, {0x00000EC0, 0x00000EC4}, {0x00000EC6, 0x00000EC6},
-{0x00000EDC, 0x00000EDF}, {0x00000F00, 0x00000F00}, {0x00000F40, 0x00000F47}, {0x00000F49, 0x00000F6C},
-{0x00000F88, 0x00000F8C}, {0x00001000, 0x0000102A}, {0x0000103F, 0x0000103F}, {0x00001050, 0x00001055},
-{0x0000105A, 0x0000105D}, {0x00001061, 0x00001061}, {0x00001065, 0x00001066}, {0x0000106E, 0x00001070},
-{0x00001075, 0x00001081}, {0x0000108E, 0x0000108E}, {0x000010A0, 0x000010C5}, {0x000010C7, 0x000010C7},
-{0x000010CD, 0x000010CD}, {0x000010D0, 0x000010FA}, {0x000010FC, 0x00001248}, {0x0000124A, 0x0000124D},
-{0x00001250, 0x00001256}, {0x00001258, 0x00001258}, {0x0000125A, 0x0000125D}, {0x00001260, 0x00001288},
-{0x0000128A, 0x0000128D}, {0x00001290, 0x000012B0}, {0x000012B2, 0x000012B5}, {0x000012B8, 0x000012BE},
-{0x000012C0, 0x000012C0}, {0x000012C2, 0x000012C5}, {0x000012C8, 0x000012D6}, {0x000012D8, 0x00001310},
-{0x00001312, 0x00001315}, {0x00001318, 0x0000135A}, {0x00001380, 0x0000138F}, {0x000013A0, 0x000013F5},
-{0x000013F8, 0x000013FD}, {0x00001401, 0x0000166C}, {0x0000166F, 0x0000167F}, {0x00001681, 0x0000169A},
-{0x000016A0, 0x000016EA}, {0x000016F1, 0x000016F8}, {0x00001700, 0x0000170C}, {0x0000170E, 0x00001711},
-{0x00001720, 0x00001731}, {0x00001740, 0x00001751}, {0x00001760, 0x0000176C}, {0x0000176E, 0x00001770},
-{0x00001780, 0x000017B3}, {0x000017D7, 0x000017D7}, {0x000017DC, 0x000017DC}, {0x00001820, 0x00001878},
-{0x00001880, 0x00001884}, {0x00001887, 0x000018A8}, {0x000018AA, 0x000018AA}, {0x000018B0, 0x000018F5},
-{0x00001900, 0x0000191E}, {0x00001950, 0x0000196D}, {0x00001970, 0x00001974}, {0x00001980, 0x000019AB},
-{0x000019B0, 0x000019C9}, {0x00001A00, 0x00001A16}, {0x00001A20, 0x00001A54}, {0x00001AA7, 0x00001AA7},
-{0x00001B05, 0x00001B33}, {0x00001B45, 0x00001B4B}, {0x00001B83, 0x00001BA0}, {0x00001BAE, 0x00001BAF},
-{0x00001BBA, 0x00001BE5}, {0x00001C00, 0x00001C23}, {0x00001C4D, 0x00001C4F}, {0x00001C5A, 0x00001C7D},
-{0x00001C80, 0x00001C88}, {0x00001C90, 0x00001CBA}, {0x00001CBD, 0x00001CBF}, {0x00001CE9, 0x00001CEC},
-{0x00001CEE, 0x00001CF3}, {0x00001CF5, 0x00001CF6}, {0x00001CFA, 0x00001CFA}, {0x00001D00, 0x00001DBF},
-{0x00001E00, 0x00001F15}, {0x00001F18, 0x00001F1D}, {0x00001F20, 0x00001F45}, {0x00001F48, 0x00001F4D},
-{0x00001F50, 0x00001F57}, {0x00001F59, 0x00001F59}, {0x00001F5B, 0x00001F5B}, {0x00001F5D, 0x00001F5D},
-{0x00001F5F, 0x00001F7D}, {0x00001F80, 0x00001FB4}, {0x00001FB6, 0x00001FBC}, {0x00001FBE, 0x00001FBE},
-{0x00001FC2, 0x00001FC4}, {0x00001FC6, 0x00001FCC}, {0x00001FD0, 0x00001FD3}, {0x00001FD6, 0x00001FDB},
-{0x00001FE0, 0x00001FEC}, {0x00001FF2, 0x00001FF4}, {0x00001FF6, 0x00001FFC}, {0x00002071, 0x00002071},
-{0x0000207F, 0x0000207F}, {0x00002090, 0x0000209C}, {0x00002102, 0x00002102}, {0x00002107, 0x00002107},
-{0x0000210A, 0x00002113}, {0x00002115, 0x00002115}, {0x00002119, 0x0000211D}, {0x00002124, 0x00002124},
-{0x00002126, 0x00002126}, {0x00002128, 0x00002128}, {0x0000212A, 0x0000212D}, {0x0000212F, 0x00002139},
-{0x0000213C, 0x0000213F}, {0x00002145, 0x00002149}, {0x0000214E, 0x0000214E}, {0x00002183, 0x00002184},
-{0x00002C00, 0x00002C2E}, {0x00002C30, 0x00002C5E}, {0x00002C60, 0x00002CE4}, {0x00002CEB, 0x00002CEE},
-{0x00002CF2, 0x00002CF3}, {0x00002D00, 0x00002D25}, {0x00002D27, 0x00002D27}, {0x00002D2D, 0x00002D2D},
-{0x00002D30, 0x00002D67}, {0x00002D6F, 0x00002D6F}, {0x00002D80, 0x00002D96}, {0x00002DA0, 0x00002DA6},
-{0x00002DA8, 0x00002DAE}, {0x00002DB0, 0x00002DB6}, {0x00002DB8, 0x00002DBE}, {0x00002DC0, 0x00002DC6},
-{0x00002DC8, 0x00002DCE}, {0x00002DD0, 0x00002DD6}, {0x00002DD8, 0x00002DDE}, {0x00002E2F, 0x00002E2F},
-{0x00003005, 0x00003006}, {0x00003031, 0x00003035}, {0x0000303B, 0x0000303C}, {0x00003041, 0x00003096},
-{0x0000309D, 0x0000309F}, {0x000030A1, 0x000030FA}, {0x000030FC, 0x000030FF}, {0x00003105, 0x0000312F},
-{0x00003131, 0x0000318E}, {0x000031A0, 0x000031BF}, {0x000031F0, 0x000031FF}, {0x00003400, 0x00004DBF},
-{0x00004E00, 0x00009FFC}, {0x0000A000, 0x0000A48C}, {0x0000A4D0, 0x0000A4FD}, {0x0000A500, 0x0000A60C},
-{0x0000A610, 0x0000A61F}, {0x0000A62A, 0x0000A62B}, {0x0000A640, 0x0000A66E}, {0x0000A67F, 0x0000A69D},
-{0x0000A6A0, 0x0000A6E5}, {0x0000A717, 0x0000A71F}, {0x0000A722, 0x0000A788}, {0x0000A78B, 0x0000A7BF},
-{0x0000A7C2, 0x0000A7CA}, {0x0000A7F5, 0x0000A801}, {0x0000A803, 0x0000A805}, {0x0000A807, 0x0000A80A},
+{0x00000860, 0x0000086A}, {0x00000870, 0x00000887}, {0x00000889, 0x0000088E}, {0x000008A0, 0x000008C9},
+{0x00000904, 0x00000939}, {0x0000093D, 0x0000093D}, {0x00000950, 0x00000950}, {0x00000958, 0x00000961},
+{0x00000971, 0x00000980}, {0x00000985, 0x0000098C}, {0x0000098F, 0x00000990}, {0x00000993, 0x000009A8},
+{0x000009AA, 0x000009B0}, {0x000009B2, 0x000009B2}, {0x000009B6, 0x000009B9}, {0x000009BD, 0x000009BD},
+{0x000009CE, 0x000009CE}, {0x000009DC, 0x000009DD}, {0x000009DF, 0x000009E1}, {0x000009F0, 0x000009F1},
+{0x000009FC, 0x000009FC}, {0x00000A05, 0x00000A0A}, {0x00000A0F, 0x00000A10}, {0x00000A13, 0x00000A28},
+{0x00000A2A, 0x00000A30}, {0x00000A32, 0x00000A33}, {0x00000A35, 0x00000A36}, {0x00000A38, 0x00000A39},
+{0x00000A59, 0x00000A5C}, {0x00000A5E, 0x00000A5E}, {0x00000A72, 0x00000A74}, {0x00000A85, 0x00000A8D},
+{0x00000A8F, 0x00000A91}, {0x00000A93, 0x00000AA8}, {0x00000AAA, 0x00000AB0}, {0x00000AB2, 0x00000AB3},
+{0x00000AB5, 0x00000AB9}, {0x00000ABD, 0x00000ABD}, {0x00000AD0, 0x00000AD0}, {0x00000AE0, 0x00000AE1},
+{0x00000AF9, 0x00000AF9}, {0x00000B05, 0x00000B0C}, {0x00000B0F, 0x00000B10}, {0x00000B13, 0x00000B28},
+{0x00000B2A, 0x00000B30}, {0x00000B32, 0x00000B33}, {0x00000B35, 0x00000B39}, {0x00000B3D, 0x00000B3D},
+{0x00000B5C, 0x00000B5D}, {0x00000B5F, 0x00000B61}, {0x00000B71, 0x00000B71}, {0x00000B83, 0x00000B83},
+{0x00000B85, 0x00000B8A}, {0x00000B8E, 0x00000B90}, {0x00000B92, 0x00000B95}, {0x00000B99, 0x00000B9A},
+{0x00000B9C, 0x00000B9C}, {0x00000B9E, 0x00000B9F}, {0x00000BA3, 0x00000BA4}, {0x00000BA8, 0x00000BAA},
+{0x00000BAE, 0x00000BB9}, {0x00000BD0, 0x00000BD0}, {0x00000C05, 0x00000C0C}, {0x00000C0E, 0x00000C10},
+{0x00000C12, 0x00000C28}, {0x00000C2A, 0x00000C39}, {0x00000C3D, 0x00000C3D}, {0x00000C58, 0x00000C5A},
+{0x00000C5D, 0x00000C5D}, {0x00000C60, 0x00000C61}, {0x00000C80, 0x00000C80}, {0x00000C85, 0x00000C8C},
+{0x00000C8E, 0x00000C90}, {0x00000C92, 0x00000CA8}, {0x00000CAA, 0x00000CB3}, {0x00000CB5, 0x00000CB9},
+{0x00000CBD, 0x00000CBD}, {0x00000CDD, 0x00000CDE}, {0x00000CE0, 0x00000CE1}, {0x00000CF1, 0x00000CF2},
+{0x00000D04, 0x00000D0C}, {0x00000D0E, 0x00000D10}, {0x00000D12, 0x00000D3A}, {0x00000D3D, 0x00000D3D},
+{0x00000D4E, 0x00000D4E}, {0x00000D54, 0x00000D56}, {0x00000D5F, 0x00000D61}, {0x00000D7A, 0x00000D7F},
+{0x00000D85, 0x00000D96}, {0x00000D9A, 0x00000DB1}, {0x00000DB3, 0x00000DBB}, {0x00000DBD, 0x00000DBD},
+{0x00000DC0, 0x00000DC6}, {0x00000E01, 0x00000E30}, {0x00000E32, 0x00000E33}, {0x00000E40, 0x00000E46},
+{0x00000E81, 0x00000E82}, {0x00000E84, 0x00000E84}, {0x00000E86, 0x00000E8A}, {0x00000E8C, 0x00000EA3},
+{0x00000EA5, 0x00000EA5}, {0x00000EA7, 0x00000EB0}, {0x00000EB2, 0x00000EB3}, {0x00000EBD, 0x00000EBD},
+{0x00000EC0, 0x00000EC4}, {0x00000EC6, 0x00000EC6}, {0x00000EDC, 0x00000EDF}, {0x00000F00, 0x00000F00},
+{0x00000F40, 0x00000F47}, {0x00000F49, 0x00000F6C}, {0x00000F88, 0x00000F8C}, {0x00001000, 0x0000102A},
+{0x0000103F, 0x0000103F}, {0x00001050, 0x00001055}, {0x0000105A, 0x0000105D}, {0x00001061, 0x00001061},
+{0x00001065, 0x00001066}, {0x0000106E, 0x00001070}, {0x00001075, 0x00001081}, {0x0000108E, 0x0000108E},
+{0x000010A0, 0x000010C5}, {0x000010C7, 0x000010C7}, {0x000010CD, 0x000010CD}, {0x000010D0, 0x000010FA},
+{0x000010FC, 0x00001248}, {0x0000124A, 0x0000124D}, {0x00001250, 0x00001256}, {0x00001258, 0x00001258},
+{0x0000125A, 0x0000125D}, {0x00001260, 0x00001288}, {0x0000128A, 0x0000128D}, {0x00001290, 0x000012B0},
+{0x000012B2, 0x000012B5}, {0x000012B8, 0x000012BE}, {0x000012C0, 0x000012C0}, {0x000012C2, 0x000012C5},
+{0x000012C8, 0x000012D6}, {0x000012D8, 0x00001310}, {0x00001312, 0x00001315}, {0x00001318, 0x0000135A},
+{0x00001380, 0x0000138F}, {0x000013A0, 0x000013F5}, {0x000013F8, 0x000013FD}, {0x00001401, 0x0000166C},
+{0x0000166F, 0x0000167F}, {0x00001681, 0x0000169A}, {0x000016A0, 0x000016EA}, {0x000016F1, 0x000016F8},
+{0x00001700, 0x00001711}, {0x0000171F, 0x00001731}, {0x00001740, 0x00001751}, {0x00001760, 0x0000176C},
+{0x0000176E, 0x00001770}, {0x00001780, 0x000017B3}, {0x000017D7, 0x000017D7}, {0x000017DC, 0x000017DC},
+{0x00001820, 0x00001878}, {0x00001880, 0x00001884}, {0x00001887, 0x000018A8}, {0x000018AA, 0x000018AA},
+{0x000018B0, 0x000018F5}, {0x00001900, 0x0000191E}, {0x00001950, 0x0000196D}, {0x00001970, 0x00001974},
+{0x00001980, 0x000019AB}, {0x000019B0, 0x000019C9}, {0x00001A00, 0x00001A16}, {0x00001A20, 0x00001A54},
+{0x00001AA7, 0x00001AA7}, {0x00001B05, 0x00001B33}, {0x00001B45, 0x00001B4C}, {0x00001B83, 0x00001BA0},
+{0x00001BAE, 0x00001BAF}, {0x00001BBA, 0x00001BE5}, {0x00001C00, 0x00001C23}, {0x00001C4D, 0x00001C4F},
+{0x00001C5A, 0x00001C7D}, {0x00001C80, 0x00001C88}, {0x00001C90, 0x00001CBA}, {0x00001CBD, 0x00001CBF},
+{0x00001CE9, 0x00001CEC}, {0x00001CEE, 0x00001CF3}, {0x00001CF5, 0x00001CF6}, {0x00001CFA, 0x00001CFA},
+{0x00001D00, 0x00001DBF}, {0x00001E00, 0x00001F15}, {0x00001F18, 0x00001F1D}, {0x00001F20, 0x00001F45},
+{0x00001F48, 0x00001F4D}, {0x00001F50, 0x00001F57}, {0x00001F59, 0x00001F59}, {0x00001F5B, 0x00001F5B},
+{0x00001F5D, 0x00001F5D}, {0x00001F5F, 0x00001F7D}, {0x00001F80, 0x00001FB4}, {0x00001FB6, 0x00001FBC},
+{0x00001FBE, 0x00001FBE}, {0x00001FC2, 0x00001FC4}, {0x00001FC6, 0x00001FCC}, {0x00001FD0, 0x00001FD3},
+{0x00001FD6, 0x00001FDB}, {0x00001FE0, 0x00001FEC}, {0x00001FF2, 0x00001FF4}, {0x00001FF6, 0x00001FFC},
+{0x00002071, 0x00002071}, {0x0000207F, 0x0000207F}, {0x00002090, 0x0000209C}, {0x00002102, 0x00002102},
+{0x00002107, 0x00002107}, {0x0000210A, 0x00002113}, {0x00002115, 0x00002115}, {0x00002119, 0x0000211D},
+{0x00002124, 0x00002124}, {0x00002126, 0x00002126}, {0x00002128, 0x00002128}, {0x0000212A, 0x0000212D},
+{0x0000212F, 0x00002139}, {0x0000213C, 0x0000213F}, {0x00002145, 0x00002149}, {0x0000214E, 0x0000214E},
+{0x00002183, 0x00002184}, {0x00002C00, 0x00002CE4}, {0x00002CEB, 0x00002CEE}, {0x00002CF2, 0x00002CF3},
+{0x00002D00, 0x00002D25}, {0x00002D27, 0x00002D27}, {0x00002D2D, 0x00002D2D}, {0x00002D30, 0x00002D67},
+{0x00002D6F, 0x00002D6F}, {0x00002D80, 0x00002D96}, {0x00002DA0, 0x00002DA6}, {0x00002DA8, 0x00002DAE},
+{0x00002DB0, 0x00002DB6}, {0x00002DB8, 0x00002DBE}, {0x00002DC0, 0x00002DC6}, {0x00002DC8, 0x00002DCE},
+{0x00002DD0, 0x00002DD6}, {0x00002DD8, 0x00002DDE}, {0x00002E2F, 0x00002E2F}, {0x00003005, 0x00003006},
+{0x00003031, 0x00003035}, {0x0000303B, 0x0000303C}, {0x00003041, 0x00003096}, {0x0000309D, 0x0000309F},
+{0x000030A1, 0x000030FA}, {0x000030FC, 0x000030FF}, {0x00003105, 0x0000312F}, {0x00003131, 0x0000318E},
+{0x000031A0, 0x000031BF}, {0x000031F0, 0x000031FF}, {0x00003400, 0x00004DBF}, {0x00004E00, 0x0000A48C},
+{0x0000A4D0, 0x0000A4FD}, {0x0000A500, 0x0000A60C}, {0x0000A610, 0x0000A61F}, {0x0000A62A, 0x0000A62B},
+{0x0000A640, 0x0000A66E}, {0x0000A67F, 0x0000A69D}, {0x0000A6A0, 0x0000A6E5}, {0x0000A717, 0x0000A71F},
+{0x0000A722, 0x0000A788}, {0x0000A78B, 0x0000A7CA}, {0x0000A7D0, 0x0000A7D1}, {0x0000A7D3, 0x0000A7D3},
+{0x0000A7D5, 0x0000A7D9}, {0x0000A7F2, 0x0000A801}, {0x0000A803, 0x0000A805}, {0x0000A807, 0x0000A80A},
{0x0000A80C, 0x0000A822}, {0x0000A840, 0x0000A873}, {0x0000A882, 0x0000A8B3}, {0x0000A8F2, 0x0000A8F7},
{0x0000A8FB, 0x0000A8FB}, {0x0000A8FD, 0x0000A8FE}, {0x0000A90A, 0x0000A925}, {0x0000A930, 0x0000A946},
{0x0000A960, 0x0000A97C}, {0x0000A984, 0x0000A9B2}, {0x0000A9CF, 0x0000A9CF}, {0x0000A9E0, 0x0000A9E4},
@@ -129,51 +148,60 @@ const std::vector> unicode_ranges_letter = {
{0x000102A0, 0x000102D0}, {0x00010300, 0x0001031F}, {0x0001032D, 0x00010340}, {0x00010342, 0x00010349},
{0x00010350, 0x00010375}, {0x00010380, 0x0001039D}, {0x000103A0, 0x000103C3}, {0x000103C8, 0x000103CF},
{0x00010400, 0x0001049D}, {0x000104B0, 0x000104D3}, {0x000104D8, 0x000104FB}, {0x00010500, 0x00010527},
-{0x00010530, 0x00010563}, {0x00010600, 0x00010736}, {0x00010740, 0x00010755}, {0x00010760, 0x00010767},
-{0x00010800, 0x00010805}, {0x00010808, 0x00010808}, {0x0001080A, 0x00010835}, {0x00010837, 0x00010838},
-{0x0001083C, 0x0001083C}, {0x0001083F, 0x00010855}, {0x00010860, 0x00010876}, {0x00010880, 0x0001089E},
-{0x000108E0, 0x000108F2}, {0x000108F4, 0x000108F5}, {0x00010900, 0x00010915}, {0x00010920, 0x00010939},
-{0x00010980, 0x000109B7}, {0x000109BE, 0x000109BF}, {0x00010A00, 0x00010A00}, {0x00010A10, 0x00010A13},
-{0x00010A15, 0x00010A17}, {0x00010A19, 0x00010A35}, {0x00010A60, 0x00010A7C}, {0x00010A80, 0x00010A9C},
-{0x00010AC0, 0x00010AC7}, {0x00010AC9, 0x00010AE4}, {0x00010B00, 0x00010B35}, {0x00010B40, 0x00010B55},
-{0x00010B60, 0x00010B72}, {0x00010B80, 0x00010B91}, {0x00010C00, 0x00010C48}, {0x00010C80, 0x00010CB2},
-{0x00010CC0, 0x00010CF2}, {0x00010D00, 0x00010D23}, {0x00010E80, 0x00010EA9}, {0x00010EB0, 0x00010EB1},
-{0x00010F00, 0x00010F1C}, {0x00010F27, 0x00010F27}, {0x00010F30, 0x00010F45}, {0x00010FB0, 0x00010FC4},
-{0x00010FE0, 0x00010FF6}, {0x00011003, 0x00011037}, {0x00011083, 0x000110AF}, {0x000110D0, 0x000110E8},
-{0x00011103, 0x00011126}, {0x00011144, 0x00011144}, {0x00011147, 0x00011147}, {0x00011150, 0x00011172},
-{0x00011176, 0x00011176}, {0x00011183, 0x000111B2}, {0x000111C1, 0x000111C4}, {0x000111DA, 0x000111DA},
-{0x000111DC, 0x000111DC}, {0x00011200, 0x00011211}, {0x00011213, 0x0001122B}, {0x00011280, 0x00011286},
-{0x00011288, 0x00011288}, {0x0001128A, 0x0001128D}, {0x0001128F, 0x0001129D}, {0x0001129F, 0x000112A8},
-{0x000112B0, 0x000112DE}, {0x00011305, 0x0001130C}, {0x0001130F, 0x00011310}, {0x00011313, 0x00011328},
-{0x0001132A, 0x00011330}, {0x00011332, 0x00011333}, {0x00011335, 0x00011339}, {0x0001133D, 0x0001133D},
-{0x00011350, 0x00011350}, {0x0001135D, 0x00011361}, {0x00011400, 0x00011434}, {0x00011447, 0x0001144A},
-{0x0001145F, 0x00011461}, {0x00011480, 0x000114AF}, {0x000114C4, 0x000114C5}, {0x000114C7, 0x000114C7},
-{0x00011580, 0x000115AE}, {0x000115D8, 0x000115DB}, {0x00011600, 0x0001162F}, {0x00011644, 0x00011644},
-{0x00011680, 0x000116AA}, {0x000116B8, 0x000116B8}, {0x00011700, 0x0001171A}, {0x00011800, 0x0001182B},
+{0x00010530, 0x00010563}, {0x00010570, 0x0001057A}, {0x0001057C, 0x0001058A}, {0x0001058C, 0x00010592},
+{0x00010594, 0x00010595}, {0x00010597, 0x000105A1}, {0x000105A3, 0x000105B1}, {0x000105B3, 0x000105B9},
+{0x000105BB, 0x000105BC}, {0x00010600, 0x00010736}, {0x00010740, 0x00010755}, {0x00010760, 0x00010767},
+{0x00010780, 0x00010785}, {0x00010787, 0x000107B0}, {0x000107B2, 0x000107BA}, {0x00010800, 0x00010805},
+{0x00010808, 0x00010808}, {0x0001080A, 0x00010835}, {0x00010837, 0x00010838}, {0x0001083C, 0x0001083C},
+{0x0001083F, 0x00010855}, {0x00010860, 0x00010876}, {0x00010880, 0x0001089E}, {0x000108E0, 0x000108F2},
+{0x000108F4, 0x000108F5}, {0x00010900, 0x00010915}, {0x00010920, 0x00010939}, {0x00010980, 0x000109B7},
+{0x000109BE, 0x000109BF}, {0x00010A00, 0x00010A00}, {0x00010A10, 0x00010A13}, {0x00010A15, 0x00010A17},
+{0x00010A19, 0x00010A35}, {0x00010A60, 0x00010A7C}, {0x00010A80, 0x00010A9C}, {0x00010AC0, 0x00010AC7},
+{0x00010AC9, 0x00010AE4}, {0x00010B00, 0x00010B35}, {0x00010B40, 0x00010B55}, {0x00010B60, 0x00010B72},
+{0x00010B80, 0x00010B91}, {0x00010C00, 0x00010C48}, {0x00010C80, 0x00010CB2}, {0x00010CC0, 0x00010CF2},
+{0x00010D00, 0x00010D23}, {0x00010E80, 0x00010EA9}, {0x00010EB0, 0x00010EB1}, {0x00010F00, 0x00010F1C},
+{0x00010F27, 0x00010F27}, {0x00010F30, 0x00010F45}, {0x00010F70, 0x00010F81}, {0x00010FB0, 0x00010FC4},
+{0x00010FE0, 0x00010FF6}, {0x00011003, 0x00011037}, {0x00011071, 0x00011072}, {0x00011075, 0x00011075},
+{0x00011083, 0x000110AF}, {0x000110D0, 0x000110E8}, {0x00011103, 0x00011126}, {0x00011144, 0x00011144},
+{0x00011147, 0x00011147}, {0x00011150, 0x00011172}, {0x00011176, 0x00011176}, {0x00011183, 0x000111B2},
+{0x000111C1, 0x000111C4}, {0x000111DA, 0x000111DA}, {0x000111DC, 0x000111DC}, {0x00011200, 0x00011211},
+{0x00011213, 0x0001122B}, {0x0001123F, 0x00011240}, {0x00011280, 0x00011286}, {0x00011288, 0x00011288},
+{0x0001128A, 0x0001128D}, {0x0001128F, 0x0001129D}, {0x0001129F, 0x000112A8}, {0x000112B0, 0x000112DE},
+{0x00011305, 0x0001130C}, {0x0001130F, 0x00011310}, {0x00011313, 0x00011328}, {0x0001132A, 0x00011330},
+{0x00011332, 0x00011333}, {0x00011335, 0x00011339}, {0x0001133D, 0x0001133D}, {0x00011350, 0x00011350},
+{0x0001135D, 0x00011361}, {0x00011400, 0x00011434}, {0x00011447, 0x0001144A}, {0x0001145F, 0x00011461},
+{0x00011480, 0x000114AF}, {0x000114C4, 0x000114C5}, {0x000114C7, 0x000114C7}, {0x00011580, 0x000115AE},
+{0x000115D8, 0x000115DB}, {0x00011600, 0x0001162F}, {0x00011644, 0x00011644}, {0x00011680, 0x000116AA},
+{0x000116B8, 0x000116B8}, {0x00011700, 0x0001171A}, {0x00011740, 0x00011746}, {0x00011800, 0x0001182B},
{0x000118A0, 0x000118DF}, {0x000118FF, 0x00011906}, {0x00011909, 0x00011909}, {0x0001190C, 0x00011913},
{0x00011915, 0x00011916}, {0x00011918, 0x0001192F}, {0x0001193F, 0x0001193F}, {0x00011941, 0x00011941},
{0x000119A0, 0x000119A7}, {0x000119AA, 0x000119D0}, {0x000119E1, 0x000119E1}, {0x000119E3, 0x000119E3},
{0x00011A00, 0x00011A00}, {0x00011A0B, 0x00011A32}, {0x00011A3A, 0x00011A3A}, {0x00011A50, 0x00011A50},
-{0x00011A5C, 0x00011A89}, {0x00011A9D, 0x00011A9D}, {0x00011AC0, 0x00011AF8}, {0x00011C00, 0x00011C08},
+{0x00011A5C, 0x00011A89}, {0x00011A9D, 0x00011A9D}, {0x00011AB0, 0x00011AF8}, {0x00011C00, 0x00011C08},
{0x00011C0A, 0x00011C2E}, {0x00011C40, 0x00011C40}, {0x00011C72, 0x00011C8F}, {0x00011D00, 0x00011D06},
{0x00011D08, 0x00011D09}, {0x00011D0B, 0x00011D30}, {0x00011D46, 0x00011D46}, {0x00011D60, 0x00011D65},
{0x00011D67, 0x00011D68}, {0x00011D6A, 0x00011D89}, {0x00011D98, 0x00011D98}, {0x00011EE0, 0x00011EF2},
-{0x00011FB0, 0x00011FB0}, {0x00012000, 0x00012399}, {0x00012480, 0x00012543}, {0x00013000, 0x0001342E},
-{0x00014400, 0x00014646}, {0x00016800, 0x00016A38}, {0x00016A40, 0x00016A5E}, {0x00016AD0, 0x00016AED},
-{0x00016B00, 0x00016B2F}, {0x00016B40, 0x00016B43}, {0x00016B63, 0x00016B77}, {0x00016B7D, 0x00016B8F},
-{0x00016E40, 0x00016E7F}, {0x00016F00, 0x00016F4A}, {0x00016F50, 0x00016F50}, {0x00016F93, 0x00016F9F},
-{0x00016FE0, 0x00016FE1}, {0x00016FE3, 0x00016FE3}, {0x00017000, 0x000187F7}, {0x00018800, 0x00018CD5},
-{0x00018D00, 0x00018D08}, {0x0001B000, 0x0001B11E}, {0x0001B150, 0x0001B152}, {0x0001B164, 0x0001B167},
-{0x0001B170, 0x0001B2FB}, {0x0001BC00, 0x0001BC6A}, {0x0001BC70, 0x0001BC7C}, {0x0001BC80, 0x0001BC88},
-{0x0001BC90, 0x0001BC99}, {0x0001D400, 0x0001D454}, {0x0001D456, 0x0001D49C}, {0x0001D49E, 0x0001D49F},
-{0x0001D4A2, 0x0001D4A2}, {0x0001D4A5, 0x0001D4A6}, {0x0001D4A9, 0x0001D4AC}, {0x0001D4AE, 0x0001D4B9},
-{0x0001D4BB, 0x0001D4BB}, {0x0001D4BD, 0x0001D4C3}, {0x0001D4C5, 0x0001D505}, {0x0001D507, 0x0001D50A},
-{0x0001D50D, 0x0001D514}, {0x0001D516, 0x0001D51C}, {0x0001D51E, 0x0001D539}, {0x0001D53B, 0x0001D53E},
-{0x0001D540, 0x0001D544}, {0x0001D546, 0x0001D546}, {0x0001D54A, 0x0001D550}, {0x0001D552, 0x0001D6A5},
-{0x0001D6A8, 0x0001D6C0}, {0x0001D6C2, 0x0001D6DA}, {0x0001D6DC, 0x0001D6FA}, {0x0001D6FC, 0x0001D714},
-{0x0001D716, 0x0001D734}, {0x0001D736, 0x0001D74E}, {0x0001D750, 0x0001D76E}, {0x0001D770, 0x0001D788},
-{0x0001D78A, 0x0001D7A8}, {0x0001D7AA, 0x0001D7C2}, {0x0001D7C4, 0x0001D7CB}, {0x0001E100, 0x0001E12C},
-{0x0001E137, 0x0001E13D}, {0x0001E14E, 0x0001E14E}, {0x0001E2C0, 0x0001E2EB}, {0x0001E800, 0x0001E8C4},
+{0x00011F02, 0x00011F02}, {0x00011F04, 0x00011F10}, {0x00011F12, 0x00011F33}, {0x00011FB0, 0x00011FB0},
+{0x00012000, 0x00012399}, {0x00012480, 0x00012543}, {0x00012F90, 0x00012FF0}, {0x00013000, 0x0001342F},
+{0x00013441, 0x00013446}, {0x00014400, 0x00014646}, {0x00016800, 0x00016A38}, {0x00016A40, 0x00016A5E},
+{0x00016A70, 0x00016ABE}, {0x00016AD0, 0x00016AED}, {0x00016B00, 0x00016B2F}, {0x00016B40, 0x00016B43},
+{0x00016B63, 0x00016B77}, {0x00016B7D, 0x00016B8F}, {0x00016E40, 0x00016E7F}, {0x00016F00, 0x00016F4A},
+{0x00016F50, 0x00016F50}, {0x00016F93, 0x00016F9F}, {0x00016FE0, 0x00016FE1}, {0x00016FE3, 0x00016FE3},
+{0x00017000, 0x000187F7}, {0x00018800, 0x00018CD5}, {0x00018D00, 0x00018D08}, {0x0001AFF0, 0x0001AFF3},
+{0x0001AFF5, 0x0001AFFB}, {0x0001AFFD, 0x0001AFFE}, {0x0001B000, 0x0001B122}, {0x0001B132, 0x0001B132},
+{0x0001B150, 0x0001B152}, {0x0001B155, 0x0001B155}, {0x0001B164, 0x0001B167}, {0x0001B170, 0x0001B2FB},
+{0x0001BC00, 0x0001BC6A}, {0x0001BC70, 0x0001BC7C}, {0x0001BC80, 0x0001BC88}, {0x0001BC90, 0x0001BC99},
+{0x0001D400, 0x0001D454}, {0x0001D456, 0x0001D49C}, {0x0001D49E, 0x0001D49F}, {0x0001D4A2, 0x0001D4A2},
+{0x0001D4A5, 0x0001D4A6}, {0x0001D4A9, 0x0001D4AC}, {0x0001D4AE, 0x0001D4B9}, {0x0001D4BB, 0x0001D4BB},
+{0x0001D4BD, 0x0001D4C3}, {0x0001D4C5, 0x0001D505}, {0x0001D507, 0x0001D50A}, {0x0001D50D, 0x0001D514},
+{0x0001D516, 0x0001D51C}, {0x0001D51E, 0x0001D539}, {0x0001D53B, 0x0001D53E}, {0x0001D540, 0x0001D544},
+{0x0001D546, 0x0001D546}, {0x0001D54A, 0x0001D550}, {0x0001D552, 0x0001D6A5}, {0x0001D6A8, 0x0001D6C0},
+{0x0001D6C2, 0x0001D6DA}, {0x0001D6DC, 0x0001D6FA}, {0x0001D6FC, 0x0001D714}, {0x0001D716, 0x0001D734},
+{0x0001D736, 0x0001D74E}, {0x0001D750, 0x0001D76E}, {0x0001D770, 0x0001D788}, {0x0001D78A, 0x0001D7A8},
+{0x0001D7AA, 0x0001D7C2}, {0x0001D7C4, 0x0001D7CB}, {0x0001DF00, 0x0001DF1E}, {0x0001DF25, 0x0001DF2A},
+{0x0001E030, 0x0001E06D}, {0x0001E100, 0x0001E12C}, {0x0001E137, 0x0001E13D}, {0x0001E14E, 0x0001E14E},
+{0x0001E290, 0x0001E2AD}, {0x0001E2C0, 0x0001E2EB}, {0x0001E4D0, 0x0001E4EB}, {0x0001E7E0, 0x0001E7E6},
+{0x0001E7E8, 0x0001E7EB}, {0x0001E7ED, 0x0001E7EE}, {0x0001E7F0, 0x0001E7FE}, {0x0001E800, 0x0001E8C4},
{0x0001E900, 0x0001E943}, {0x0001E94B, 0x0001E94B}, {0x0001EE00, 0x0001EE03}, {0x0001EE05, 0x0001EE1F},
{0x0001EE21, 0x0001EE22}, {0x0001EE24, 0x0001EE24}, {0x0001EE27, 0x0001EE27}, {0x0001EE29, 0x0001EE32},
{0x0001EE34, 0x0001EE37}, {0x0001EE39, 0x0001EE39}, {0x0001EE3B, 0x0001EE3B}, {0x0001EE42, 0x0001EE42},
@@ -182,13 +210,18 @@ const std::vector> unicode_ranges_letter = {
{0x0001EE5B, 0x0001EE5B}, {0x0001EE5D, 0x0001EE5D}, {0x0001EE5F, 0x0001EE5F}, {0x0001EE61, 0x0001EE62},
{0x0001EE64, 0x0001EE64}, {0x0001EE67, 0x0001EE6A}, {0x0001EE6C, 0x0001EE72}, {0x0001EE74, 0x0001EE77},
{0x0001EE79, 0x0001EE7C}, {0x0001EE7E, 0x0001EE7E}, {0x0001EE80, 0x0001EE89}, {0x0001EE8B, 0x0001EE9B},
-{0x0001EEA1, 0x0001EEA3}, {0x0001EEA5, 0x0001EEA9}, {0x0001EEAB, 0x0001EEBB}, {0x00020000, 0x0002A6DD},
-{0x0002A700, 0x0002B734}, {0x0002B740, 0x0002B81D}, {0x0002B820, 0x0002CEA1}, {0x0002CEB0, 0x0002EBE0},
-{0x0002F800, 0x0002FA1D}, {0x00030000, 0x0003134A},
+{0x0001EEA1, 0x0001EEA3}, {0x0001EEA5, 0x0001EEA9}, {0x0001EEAB, 0x0001EEBB}, {0x00020000, 0x0002A6DF},
+{0x0002A700, 0x0002B739}, {0x0002B740, 0x0002B81D}, {0x0002B820, 0x0002CEA1}, {0x0002CEB0, 0x0002EBE0},
+{0x0002EBF0, 0x0002EE5D}, {0x0002F800, 0x0002FA1D}, {0x00030000, 0x0003134A}, {0x00031350, 0x000323AF},
+};
+
+const std::vector> unicode_ranges_separator = {
+{0x00000020, 0x00000020}, {0x000000A0, 0x000000A0}, {0x00001680, 0x00001680}, {0x00002000, 0x0000200A},
+{0x00002028, 0x00002029}, {0x0000202F, 0x0000202F}, {0x0000205F, 0x0000205F}, {0x00003000, 0x00003000},
};
const std::vector> unicode_ranges_whitespace = {
-{0x00000009, 0x0000000D}, {0x0000001C, 0x00000020}, {0x00000085, 0x00000085}, {0x000000A0, 0x000000A0},
+{0x00000009, 0x0000000D}, {0x00000020, 0x00000020}, {0x00000085, 0x00000085}, {0x000000A0, 0x000000A0},
{0x00001680, 0x00001680}, {0x00002000, 0x0000200A}, {0x00002028, 0x00002029}, {0x0000202F, 0x0000202F},
{0x0000205F, 0x0000205F}, {0x00003000, 0x00003000},
};
@@ -200,72 +233,77 @@ const std::vector> unicode_ranges_accent_mark = {
{0x000006E7, 0x000006E8}, {0x000006EA, 0x000006ED}, {0x00000711, 0x00000711}, {0x00000730, 0x0000074A},
{0x000007A6, 0x000007B0}, {0x000007EB, 0x000007F3}, {0x000007FD, 0x000007FD}, {0x00000816, 0x00000819},
{0x0000081B, 0x00000823}, {0x00000825, 0x00000827}, {0x00000829, 0x0000082D}, {0x00000859, 0x0000085B},
-{0x000008D3, 0x000008E1}, {0x000008E3, 0x00000903}, {0x0000093A, 0x0000093C}, {0x0000093E, 0x0000094F},
-{0x00000951, 0x00000957}, {0x00000962, 0x00000963}, {0x00000981, 0x00000983}, {0x000009BC, 0x000009BC},
-{0x000009BE, 0x000009C4}, {0x000009C7, 0x000009C8}, {0x000009CB, 0x000009CD}, {0x000009D7, 0x000009D7},
-{0x000009E2, 0x000009E3}, {0x000009FE, 0x000009FE}, {0x00000A01, 0x00000A03}, {0x00000A3C, 0x00000A3C},
-{0x00000A3E, 0x00000A42}, {0x00000A47, 0x00000A48}, {0x00000A4B, 0x00000A4D}, {0x00000A51, 0x00000A51},
-{0x00000A70, 0x00000A71}, {0x00000A75, 0x00000A75}, {0x00000A81, 0x00000A83}, {0x00000ABC, 0x00000ABC},
-{0x00000ABE, 0x00000AC5}, {0x00000AC7, 0x00000AC9}, {0x00000ACB, 0x00000ACD}, {0x00000AE2, 0x00000AE3},
-{0x00000AFA, 0x00000AFF}, {0x00000B01, 0x00000B03}, {0x00000B3C, 0x00000B3C}, {0x00000B3E, 0x00000B44},
-{0x00000B47, 0x00000B48}, {0x00000B4B, 0x00000B4D}, {0x00000B55, 0x00000B57}, {0x00000B62, 0x00000B63},
-{0x00000B82, 0x00000B82}, {0x00000BBE, 0x00000BC2}, {0x00000BC6, 0x00000BC8}, {0x00000BCA, 0x00000BCD},
-{0x00000BD7, 0x00000BD7}, {0x00000C00, 0x00000C04}, {0x00000C3E, 0x00000C44}, {0x00000C46, 0x00000C48},
-{0x00000C4A, 0x00000C4D}, {0x00000C55, 0x00000C56}, {0x00000C62, 0x00000C63}, {0x00000C81, 0x00000C83},
-{0x00000CBC, 0x00000CBC}, {0x00000CBE, 0x00000CC4}, {0x00000CC6, 0x00000CC8}, {0x00000CCA, 0x00000CCD},
-{0x00000CD5, 0x00000CD6}, {0x00000CE2, 0x00000CE3}, {0x00000D00, 0x00000D03}, {0x00000D3B, 0x00000D3C},
-{0x00000D3E, 0x00000D44}, {0x00000D46, 0x00000D48}, {0x00000D4A, 0x00000D4D}, {0x00000D57, 0x00000D57},
-{0x00000D62, 0x00000D63}, {0x00000D81, 0x00000D83}, {0x00000DCA, 0x00000DCA}, {0x00000DCF, 0x00000DD4},
-{0x00000DD6, 0x00000DD6}, {0x00000DD8, 0x00000DDF}, {0x00000DF2, 0x00000DF3}, {0x00000E31, 0x00000E31},
-{0x00000E34, 0x00000E3A}, {0x00000E47, 0x00000E4E}, {0x00000EB1, 0x00000EB1}, {0x00000EB4, 0x00000EBC},
-{0x00000EC8, 0x00000ECD}, {0x00000F18, 0x00000F19}, {0x00000F35, 0x00000F35}, {0x00000F37, 0x00000F37},
-{0x00000F39, 0x00000F39}, {0x00000F3E, 0x00000F3F}, {0x00000F71, 0x00000F84}, {0x00000F86, 0x00000F87},
-{0x00000F8D, 0x00000F97}, {0x00000F99, 0x00000FBC}, {0x00000FC6, 0x00000FC6}, {0x0000102B, 0x0000103E},
-{0x00001056, 0x00001059}, {0x0000105E, 0x00001060}, {0x00001062, 0x00001064}, {0x00001067, 0x0000106D},
-{0x00001071, 0x00001074}, {0x00001082, 0x0000108D}, {0x0000108F, 0x0000108F}, {0x0000109A, 0x0000109D},
-{0x0000135D, 0x0000135F}, {0x00001712, 0x00001714}, {0x00001732, 0x00001734}, {0x00001752, 0x00001753},
-{0x00001772, 0x00001773}, {0x000017B4, 0x000017D3}, {0x000017DD, 0x000017DD}, {0x0000180B, 0x0000180D},
+{0x00000898, 0x0000089F}, {0x000008CA, 0x000008E1}, {0x000008E3, 0x00000903}, {0x0000093A, 0x0000093C},
+{0x0000093E, 0x0000094F}, {0x00000951, 0x00000957}, {0x00000962, 0x00000963}, {0x00000981, 0x00000983},
+{0x000009BC, 0x000009BC}, {0x000009BE, 0x000009C4}, {0x000009C7, 0x000009C8}, {0x000009CB, 0x000009CD},
+{0x000009D7, 0x000009D7}, {0x000009E2, 0x000009E3}, {0x000009FE, 0x000009FE}, {0x00000A01, 0x00000A03},
+{0x00000A3C, 0x00000A3C}, {0x00000A3E, 0x00000A42}, {0x00000A47, 0x00000A48}, {0x00000A4B, 0x00000A4D},
+{0x00000A51, 0x00000A51}, {0x00000A70, 0x00000A71}, {0x00000A75, 0x00000A75}, {0x00000A81, 0x00000A83},
+{0x00000ABC, 0x00000ABC}, {0x00000ABE, 0x00000AC5}, {0x00000AC7, 0x00000AC9}, {0x00000ACB, 0x00000ACD},
+{0x00000AE2, 0x00000AE3}, {0x00000AFA, 0x00000AFF}, {0x00000B01, 0x00000B03}, {0x00000B3C, 0x00000B3C},
+{0x00000B3E, 0x00000B44}, {0x00000B47, 0x00000B48}, {0x00000B4B, 0x00000B4D}, {0x00000B55, 0x00000B57},
+{0x00000B62, 0x00000B63}, {0x00000B82, 0x00000B82}, {0x00000BBE, 0x00000BC2}, {0x00000BC6, 0x00000BC8},
+{0x00000BCA, 0x00000BCD}, {0x00000BD7, 0x00000BD7}, {0x00000C00, 0x00000C04}, {0x00000C3C, 0x00000C3C},
+{0x00000C3E, 0x00000C44}, {0x00000C46, 0x00000C48}, {0x00000C4A, 0x00000C4D}, {0x00000C55, 0x00000C56},
+{0x00000C62, 0x00000C63}, {0x00000C81, 0x00000C83}, {0x00000CBC, 0x00000CBC}, {0x00000CBE, 0x00000CC4},
+{0x00000CC6, 0x00000CC8}, {0x00000CCA, 0x00000CCD}, {0x00000CD5, 0x00000CD6}, {0x00000CE2, 0x00000CE3},
+{0x00000CF3, 0x00000CF3}, {0x00000D00, 0x00000D03}, {0x00000D3B, 0x00000D3C}, {0x00000D3E, 0x00000D44},
+{0x00000D46, 0x00000D48}, {0x00000D4A, 0x00000D4D}, {0x00000D57, 0x00000D57}, {0x00000D62, 0x00000D63},
+{0x00000D81, 0x00000D83}, {0x00000DCA, 0x00000DCA}, {0x00000DCF, 0x00000DD4}, {0x00000DD6, 0x00000DD6},
+{0x00000DD8, 0x00000DDF}, {0x00000DF2, 0x00000DF3}, {0x00000E31, 0x00000E31}, {0x00000E34, 0x00000E3A},
+{0x00000E47, 0x00000E4E}, {0x00000EB1, 0x00000EB1}, {0x00000EB4, 0x00000EBC}, {0x00000EC8, 0x00000ECE},
+{0x00000F18, 0x00000F19}, {0x00000F35, 0x00000F35}, {0x00000F37, 0x00000F37}, {0x00000F39, 0x00000F39},
+{0x00000F3E, 0x00000F3F}, {0x00000F71, 0x00000F84}, {0x00000F86, 0x00000F87}, {0x00000F8D, 0x00000F97},
+{0x00000F99, 0x00000FBC}, {0x00000FC6, 0x00000FC6}, {0x0000102B, 0x0000103E}, {0x00001056, 0x00001059},
+{0x0000105E, 0x00001060}, {0x00001062, 0x00001064}, {0x00001067, 0x0000106D}, {0x00001071, 0x00001074},
+{0x00001082, 0x0000108D}, {0x0000108F, 0x0000108F}, {0x0000109A, 0x0000109D}, {0x0000135D, 0x0000135F},
+{0x00001712, 0x00001715}, {0x00001732, 0x00001734}, {0x00001752, 0x00001753}, {0x00001772, 0x00001773},
+{0x000017B4, 0x000017D3}, {0x000017DD, 0x000017DD}, {0x0000180B, 0x0000180D}, {0x0000180F, 0x0000180F},
{0x00001885, 0x00001886}, {0x000018A9, 0x000018A9}, {0x00001920, 0x0000192B}, {0x00001930, 0x0000193B},
{0x00001A17, 0x00001A1B}, {0x00001A55, 0x00001A5E}, {0x00001A60, 0x00001A7C}, {0x00001A7F, 0x00001A7F},
-{0x00001AB0, 0x00001AC0}, {0x00001B00, 0x00001B04}, {0x00001B34, 0x00001B44}, {0x00001B6B, 0x00001B73},
+{0x00001AB0, 0x00001ACE}, {0x00001B00, 0x00001B04}, {0x00001B34, 0x00001B44}, {0x00001B6B, 0x00001B73},
{0x00001B80, 0x00001B82}, {0x00001BA1, 0x00001BAD}, {0x00001BE6, 0x00001BF3}, {0x00001C24, 0x00001C37},
{0x00001CD0, 0x00001CD2}, {0x00001CD4, 0x00001CE8}, {0x00001CED, 0x00001CED}, {0x00001CF4, 0x00001CF4},
-{0x00001CF7, 0x00001CF9}, {0x00001DC0, 0x00001DF9}, {0x00001DFB, 0x00001DFF}, {0x000020D0, 0x000020F0},
-{0x00002CEF, 0x00002CF1}, {0x00002D7F, 0x00002D7F}, {0x00002DE0, 0x00002DFF}, {0x0000302A, 0x0000302F},
-{0x00003099, 0x0000309A}, {0x0000A66F, 0x0000A672}, {0x0000A674, 0x0000A67D}, {0x0000A69E, 0x0000A69F},
-{0x0000A6F0, 0x0000A6F1}, {0x0000A802, 0x0000A802}, {0x0000A806, 0x0000A806}, {0x0000A80B, 0x0000A80B},
-{0x0000A823, 0x0000A827}, {0x0000A82C, 0x0000A82C}, {0x0000A880, 0x0000A881}, {0x0000A8B4, 0x0000A8C5},
-{0x0000A8E0, 0x0000A8F1}, {0x0000A8FF, 0x0000A8FF}, {0x0000A926, 0x0000A92D}, {0x0000A947, 0x0000A953},
-{0x0000A980, 0x0000A983}, {0x0000A9B3, 0x0000A9C0}, {0x0000A9E5, 0x0000A9E5}, {0x0000AA29, 0x0000AA36},
-{0x0000AA43, 0x0000AA43}, {0x0000AA4C, 0x0000AA4D}, {0x0000AA7B, 0x0000AA7D}, {0x0000AAB0, 0x0000AAB0},
-{0x0000AAB2, 0x0000AAB4}, {0x0000AAB7, 0x0000AAB8}, {0x0000AABE, 0x0000AABF}, {0x0000AAC1, 0x0000AAC1},
-{0x0000AAEB, 0x0000AAEF}, {0x0000AAF5, 0x0000AAF6}, {0x0000ABE3, 0x0000ABEA}, {0x0000ABEC, 0x0000ABED},
-{0x0000FB1E, 0x0000FB1E}, {0x0000FE00, 0x0000FE0F}, {0x0000FE20, 0x0000FE2F}, {0x000101FD, 0x000101FD},
-{0x000102E0, 0x000102E0}, {0x00010376, 0x0001037A}, {0x00010A01, 0x00010A03}, {0x00010A05, 0x00010A06},
-{0x00010A0C, 0x00010A0F}, {0x00010A38, 0x00010A3A}, {0x00010A3F, 0x00010A3F}, {0x00010AE5, 0x00010AE6},
-{0x00010D24, 0x00010D27}, {0x00010EAB, 0x00010EAC}, {0x00010F46, 0x00010F50}, {0x00011000, 0x00011002},
-{0x00011038, 0x00011046}, {0x0001107F, 0x00011082}, {0x000110B0, 0x000110BA}, {0x00011100, 0x00011102},
+{0x00001CF7, 0x00001CF9}, {0x00001DC0, 0x00001DFF}, {0x000020D0, 0x000020F0}, {0x00002CEF, 0x00002CF1},
+{0x00002D7F, 0x00002D7F}, {0x00002DE0, 0x00002DFF}, {0x0000302A, 0x0000302F}, {0x00003099, 0x0000309A},
+{0x0000A66F, 0x0000A672}, {0x0000A674, 0x0000A67D}, {0x0000A69E, 0x0000A69F}, {0x0000A6F0, 0x0000A6F1},
+{0x0000A802, 0x0000A802}, {0x0000A806, 0x0000A806}, {0x0000A80B, 0x0000A80B}, {0x0000A823, 0x0000A827},
+{0x0000A82C, 0x0000A82C}, {0x0000A880, 0x0000A881}, {0x0000A8B4, 0x0000A8C5}, {0x0000A8E0, 0x0000A8F1},
+{0x0000A8FF, 0x0000A8FF}, {0x0000A926, 0x0000A92D}, {0x0000A947, 0x0000A953}, {0x0000A980, 0x0000A983},
+{0x0000A9B3, 0x0000A9C0}, {0x0000A9E5, 0x0000A9E5}, {0x0000AA29, 0x0000AA36}, {0x0000AA43, 0x0000AA43},
+{0x0000AA4C, 0x0000AA4D}, {0x0000AA7B, 0x0000AA7D}, {0x0000AAB0, 0x0000AAB0}, {0x0000AAB2, 0x0000AAB4},
+{0x0000AAB7, 0x0000AAB8}, {0x0000AABE, 0x0000AABF}, {0x0000AAC1, 0x0000AAC1}, {0x0000AAEB, 0x0000AAEF},
+{0x0000AAF5, 0x0000AAF6}, {0x0000ABE3, 0x0000ABEA}, {0x0000ABEC, 0x0000ABED}, {0x0000FB1E, 0x0000FB1E},
+{0x0000FE00, 0x0000FE0F}, {0x0000FE20, 0x0000FE2F}, {0x000101FD, 0x000101FD}, {0x000102E0, 0x000102E0},
+{0x00010376, 0x0001037A}, {0x00010A01, 0x00010A03}, {0x00010A05, 0x00010A06}, {0x00010A0C, 0x00010A0F},
+{0x00010A38, 0x00010A3A}, {0x00010A3F, 0x00010A3F}, {0x00010AE5, 0x00010AE6}, {0x00010D24, 0x00010D27},
+{0x00010EAB, 0x00010EAC}, {0x00010EFD, 0x00010EFF}, {0x00010F46, 0x00010F50}, {0x00010F82, 0x00010F85},
+{0x00011000, 0x00011002}, {0x00011038, 0x00011046}, {0x00011070, 0x00011070}, {0x00011073, 0x00011074},
+{0x0001107F, 0x00011082}, {0x000110B0, 0x000110BA}, {0x000110C2, 0x000110C2}, {0x00011100, 0x00011102},
{0x00011127, 0x00011134}, {0x00011145, 0x00011146}, {0x00011173, 0x00011173}, {0x00011180, 0x00011182},
{0x000111B3, 0x000111C0}, {0x000111C9, 0x000111CC}, {0x000111CE, 0x000111CF}, {0x0001122C, 0x00011237},
-{0x0001123E, 0x0001123E}, {0x000112DF, 0x000112EA}, {0x00011300, 0x00011303}, {0x0001133B, 0x0001133C},
-{0x0001133E, 0x00011344}, {0x00011347, 0x00011348}, {0x0001134B, 0x0001134D}, {0x00011357, 0x00011357},
-{0x00011362, 0x00011363}, {0x00011366, 0x0001136C}, {0x00011370, 0x00011374}, {0x00011435, 0x00011446},
-{0x0001145E, 0x0001145E}, {0x000114B0, 0x000114C3}, {0x000115AF, 0x000115B5}, {0x000115B8, 0x000115C0},
-{0x000115DC, 0x000115DD}, {0x00011630, 0x00011640}, {0x000116AB, 0x000116B7}, {0x0001171D, 0x0001172B},
-{0x0001182C, 0x0001183A}, {0x00011930, 0x00011935}, {0x00011937, 0x00011938}, {0x0001193B, 0x0001193E},
-{0x00011940, 0x00011940}, {0x00011942, 0x00011943}, {0x000119D1, 0x000119D7}, {0x000119DA, 0x000119E0},
-{0x000119E4, 0x000119E4}, {0x00011A01, 0x00011A0A}, {0x00011A33, 0x00011A39}, {0x00011A3B, 0x00011A3E},
-{0x00011A47, 0x00011A47}, {0x00011A51, 0x00011A5B}, {0x00011A8A, 0x00011A99}, {0x00011C2F, 0x00011C36},
-{0x00011C38, 0x00011C3F}, {0x00011C92, 0x00011CA7}, {0x00011CA9, 0x00011CB6}, {0x00011D31, 0x00011D36},
-{0x00011D3A, 0x00011D3A}, {0x00011D3C, 0x00011D3D}, {0x00011D3F, 0x00011D45}, {0x00011D47, 0x00011D47},
-{0x00011D8A, 0x00011D8E}, {0x00011D90, 0x00011D91}, {0x00011D93, 0x00011D97}, {0x00011EF3, 0x00011EF6},
-{0x00016AF0, 0x00016AF4}, {0x00016B30, 0x00016B36}, {0x00016F4F, 0x00016F4F}, {0x00016F51, 0x00016F87},
-{0x00016F8F, 0x00016F92}, {0x00016FE4, 0x00016FE4}, {0x00016FF0, 0x00016FF1}, {0x0001BC9D, 0x0001BC9E},
-{0x0001D165, 0x0001D169}, {0x0001D16D, 0x0001D172}, {0x0001D17B, 0x0001D182}, {0x0001D185, 0x0001D18B},
-{0x0001D1AA, 0x0001D1AD}, {0x0001D242, 0x0001D244}, {0x0001DA00, 0x0001DA36}, {0x0001DA3B, 0x0001DA6C},
-{0x0001DA75, 0x0001DA75}, {0x0001DA84, 0x0001DA84}, {0x0001DA9B, 0x0001DA9F}, {0x0001DAA1, 0x0001DAAF},
-{0x0001E000, 0x0001E006}, {0x0001E008, 0x0001E018}, {0x0001E01B, 0x0001E021}, {0x0001E023, 0x0001E024},
-{0x0001E026, 0x0001E02A}, {0x0001E130, 0x0001E136}, {0x0001E2EC, 0x0001E2EF}, {0x0001E8D0, 0x0001E8D6},
+{0x0001123E, 0x0001123E}, {0x00011241, 0x00011241}, {0x000112DF, 0x000112EA}, {0x00011300, 0x00011303},
+{0x0001133B, 0x0001133C}, {0x0001133E, 0x00011344}, {0x00011347, 0x00011348}, {0x0001134B, 0x0001134D},
+{0x00011357, 0x00011357}, {0x00011362, 0x00011363}, {0x00011366, 0x0001136C}, {0x00011370, 0x00011374},
+{0x00011435, 0x00011446}, {0x0001145E, 0x0001145E}, {0x000114B0, 0x000114C3}, {0x000115AF, 0x000115B5},
+{0x000115B8, 0x000115C0}, {0x000115DC, 0x000115DD}, {0x00011630, 0x00011640}, {0x000116AB, 0x000116B7},
+{0x0001171D, 0x0001172B}, {0x0001182C, 0x0001183A}, {0x00011930, 0x00011935}, {0x00011937, 0x00011938},
+{0x0001193B, 0x0001193E}, {0x00011940, 0x00011940}, {0x00011942, 0x00011943}, {0x000119D1, 0x000119D7},
+{0x000119DA, 0x000119E0}, {0x000119E4, 0x000119E4}, {0x00011A01, 0x00011A0A}, {0x00011A33, 0x00011A39},
+{0x00011A3B, 0x00011A3E}, {0x00011A47, 0x00011A47}, {0x00011A51, 0x00011A5B}, {0x00011A8A, 0x00011A99},
+{0x00011C2F, 0x00011C36}, {0x00011C38, 0x00011C3F}, {0x00011C92, 0x00011CA7}, {0x00011CA9, 0x00011CB6},
+{0x00011D31, 0x00011D36}, {0x00011D3A, 0x00011D3A}, {0x00011D3C, 0x00011D3D}, {0x00011D3F, 0x00011D45},
+{0x00011D47, 0x00011D47}, {0x00011D8A, 0x00011D8E}, {0x00011D90, 0x00011D91}, {0x00011D93, 0x00011D97},
+{0x00011EF3, 0x00011EF6}, {0x00011F00, 0x00011F01}, {0x00011F03, 0x00011F03}, {0x00011F34, 0x00011F3A},
+{0x00011F3E, 0x00011F42}, {0x00013440, 0x00013440}, {0x00013447, 0x00013455}, {0x00016AF0, 0x00016AF4},
+{0x00016B30, 0x00016B36}, {0x00016F4F, 0x00016F4F}, {0x00016F51, 0x00016F87}, {0x00016F8F, 0x00016F92},
+{0x00016FE4, 0x00016FE4}, {0x00016FF0, 0x00016FF1}, {0x0001BC9D, 0x0001BC9E}, {0x0001CF00, 0x0001CF2D},
+{0x0001CF30, 0x0001CF46}, {0x0001D165, 0x0001D169}, {0x0001D16D, 0x0001D172}, {0x0001D17B, 0x0001D182},
+{0x0001D185, 0x0001D18B}, {0x0001D1AA, 0x0001D1AD}, {0x0001D242, 0x0001D244}, {0x0001DA00, 0x0001DA36},
+{0x0001DA3B, 0x0001DA6C}, {0x0001DA75, 0x0001DA75}, {0x0001DA84, 0x0001DA84}, {0x0001DA9B, 0x0001DA9F},
+{0x0001DAA1, 0x0001DAAF}, {0x0001E000, 0x0001E006}, {0x0001E008, 0x0001E018}, {0x0001E01B, 0x0001E021},
+{0x0001E023, 0x0001E024}, {0x0001E026, 0x0001E02A}, {0x0001E08F, 0x0001E08F}, {0x0001E130, 0x0001E136},
+{0x0001E2AE, 0x0001E2AE}, {0x0001E2EC, 0x0001E2EF}, {0x0001E4EC, 0x0001E4EF}, {0x0001E8D0, 0x0001E8D6},
{0x0001E944, 0x0001E94A}, {0x000E0100, 0x000E01EF},
};
@@ -276,7 +314,7 @@ const std::vector> unicode_ranges_punctuation = {
{0x000000B6, 0x000000B7}, {0x000000BB, 0x000000BB}, {0x000000BF, 0x000000BF}, {0x0000037E, 0x0000037E},
{0x00000387, 0x00000387}, {0x0000055A, 0x0000055F}, {0x00000589, 0x0000058A}, {0x000005BE, 0x000005BE},
{0x000005C0, 0x000005C0}, {0x000005C3, 0x000005C3}, {0x000005C6, 0x000005C6}, {0x000005F3, 0x000005F4},
-{0x00000609, 0x0000060A}, {0x0000060C, 0x0000060D}, {0x0000061B, 0x0000061B}, {0x0000061E, 0x0000061F},
+{0x00000609, 0x0000060A}, {0x0000060C, 0x0000060D}, {0x0000061B, 0x0000061B}, {0x0000061D, 0x0000061F},
{0x0000066A, 0x0000066D}, {0x000006D4, 0x000006D4}, {0x00000700, 0x0000070D}, {0x000007F7, 0x000007F9},
{0x00000830, 0x0000083E}, {0x0000085E, 0x0000085E}, {0x00000964, 0x00000965}, {0x00000970, 0x00000970},
{0x000009FD, 0x000009FD}, {0x00000A76, 0x00000A76}, {0x00000AF0, 0x00000AF0}, {0x00000C77, 0x00000C77},
@@ -286,37 +324,38 @@ const std::vector> unicode_ranges_punctuation = {
{0x00001360, 0x00001368}, {0x00001400, 0x00001400}, {0x0000166E, 0x0000166E}, {0x0000169B, 0x0000169C},
{0x000016EB, 0x000016ED}, {0x00001735, 0x00001736}, {0x000017D4, 0x000017D6}, {0x000017D8, 0x000017DA},
{0x00001800, 0x0000180A}, {0x00001944, 0x00001945}, {0x00001A1E, 0x00001A1F}, {0x00001AA0, 0x00001AA6},
-{0x00001AA8, 0x00001AAD}, {0x00001B5A, 0x00001B60}, {0x00001BFC, 0x00001BFF}, {0x00001C3B, 0x00001C3F},
-{0x00001C7E, 0x00001C7F}, {0x00001CC0, 0x00001CC7}, {0x00001CD3, 0x00001CD3}, {0x00002010, 0x00002027},
-{0x00002030, 0x00002043}, {0x00002045, 0x00002051}, {0x00002053, 0x0000205E}, {0x0000207D, 0x0000207E},
-{0x0000208D, 0x0000208E}, {0x00002308, 0x0000230B}, {0x00002329, 0x0000232A}, {0x00002768, 0x00002775},
-{0x000027C5, 0x000027C6}, {0x000027E6, 0x000027EF}, {0x00002983, 0x00002998}, {0x000029D8, 0x000029DB},
-{0x000029FC, 0x000029FD}, {0x00002CF9, 0x00002CFC}, {0x00002CFE, 0x00002CFF}, {0x00002D70, 0x00002D70},
-{0x00002E00, 0x00002E2E}, {0x00002E30, 0x00002E4F}, {0x00002E52, 0x00002E52}, {0x00003001, 0x00003003},
-{0x00003008, 0x00003011}, {0x00003014, 0x0000301F}, {0x00003030, 0x00003030}, {0x0000303D, 0x0000303D},
-{0x000030A0, 0x000030A0}, {0x000030FB, 0x000030FB}, {0x0000A4FE, 0x0000A4FF}, {0x0000A60D, 0x0000A60F},
-{0x0000A673, 0x0000A673}, {0x0000A67E, 0x0000A67E}, {0x0000A6F2, 0x0000A6F7}, {0x0000A874, 0x0000A877},
-{0x0000A8CE, 0x0000A8CF}, {0x0000A8F8, 0x0000A8FA}, {0x0000A8FC, 0x0000A8FC}, {0x0000A92E, 0x0000A92F},
-{0x0000A95F, 0x0000A95F}, {0x0000A9C1, 0x0000A9CD}, {0x0000A9DE, 0x0000A9DF}, {0x0000AA5C, 0x0000AA5F},
-{0x0000AADE, 0x0000AADF}, {0x0000AAF0, 0x0000AAF1}, {0x0000ABEB, 0x0000ABEB}, {0x0000FD3E, 0x0000FD3F},
-{0x0000FE10, 0x0000FE19}, {0x0000FE30, 0x0000FE52}, {0x0000FE54, 0x0000FE61}, {0x0000FE63, 0x0000FE63},
-{0x0000FE68, 0x0000FE68}, {0x0000FE6A, 0x0000FE6B}, {0x0000FF01, 0x0000FF03}, {0x0000FF05, 0x0000FF0A},
-{0x0000FF0C, 0x0000FF0F}, {0x0000FF1A, 0x0000FF1B}, {0x0000FF1F, 0x0000FF20}, {0x0000FF3B, 0x0000FF3D},
-{0x0000FF3F, 0x0000FF3F}, {0x0000FF5B, 0x0000FF5B}, {0x0000FF5D, 0x0000FF5D}, {0x0000FF5F, 0x0000FF65},
-{0x00010100, 0x00010102}, {0x0001039F, 0x0001039F}, {0x000103D0, 0x000103D0}, {0x0001056F, 0x0001056F},
-{0x00010857, 0x00010857}, {0x0001091F, 0x0001091F}, {0x0001093F, 0x0001093F}, {0x00010A50, 0x00010A58},
-{0x00010A7F, 0x00010A7F}, {0x00010AF0, 0x00010AF6}, {0x00010B39, 0x00010B3F}, {0x00010B99, 0x00010B9C},
-{0x00010EAD, 0x00010EAD}, {0x00010F55, 0x00010F59}, {0x00011047, 0x0001104D}, {0x000110BB, 0x000110BC},
-{0x000110BE, 0x000110C1}, {0x00011140, 0x00011143}, {0x00011174, 0x00011175}, {0x000111C5, 0x000111C8},
-{0x000111CD, 0x000111CD}, {0x000111DB, 0x000111DB}, {0x000111DD, 0x000111DF}, {0x00011238, 0x0001123D},
-{0x000112A9, 0x000112A9}, {0x0001144B, 0x0001144F}, {0x0001145A, 0x0001145B}, {0x0001145D, 0x0001145D},
-{0x000114C6, 0x000114C6}, {0x000115C1, 0x000115D7}, {0x00011641, 0x00011643}, {0x00011660, 0x0001166C},
-{0x0001173C, 0x0001173E}, {0x0001183B, 0x0001183B}, {0x00011944, 0x00011946}, {0x000119E2, 0x000119E2},
-{0x00011A3F, 0x00011A46}, {0x00011A9A, 0x00011A9C}, {0x00011A9E, 0x00011AA2}, {0x00011C41, 0x00011C45},
-{0x00011C70, 0x00011C71}, {0x00011EF7, 0x00011EF8}, {0x00011FFF, 0x00011FFF}, {0x00012470, 0x00012474},
-{0x00016A6E, 0x00016A6F}, {0x00016AF5, 0x00016AF5}, {0x00016B37, 0x00016B3B}, {0x00016B44, 0x00016B44},
-{0x00016E97, 0x00016E9A}, {0x00016FE2, 0x00016FE2}, {0x0001BC9F, 0x0001BC9F}, {0x0001DA87, 0x0001DA8B},
-{0x0001E95E, 0x0001E95F},
+{0x00001AA8, 0x00001AAD}, {0x00001B5A, 0x00001B60}, {0x00001B7D, 0x00001B7E}, {0x00001BFC, 0x00001BFF},
+{0x00001C3B, 0x00001C3F}, {0x00001C7E, 0x00001C7F}, {0x00001CC0, 0x00001CC7}, {0x00001CD3, 0x00001CD3},
+{0x00002010, 0x00002027}, {0x00002030, 0x00002043}, {0x00002045, 0x00002051}, {0x00002053, 0x0000205E},
+{0x0000207D, 0x0000207E}, {0x0000208D, 0x0000208E}, {0x00002308, 0x0000230B}, {0x00002329, 0x0000232A},
+{0x00002768, 0x00002775}, {0x000027C5, 0x000027C6}, {0x000027E6, 0x000027EF}, {0x00002983, 0x00002998},
+{0x000029D8, 0x000029DB}, {0x000029FC, 0x000029FD}, {0x00002CF9, 0x00002CFC}, {0x00002CFE, 0x00002CFF},
+{0x00002D70, 0x00002D70}, {0x00002E00, 0x00002E2E}, {0x00002E30, 0x00002E4F}, {0x00002E52, 0x00002E5D},
+{0x00003001, 0x00003003}, {0x00003008, 0x00003011}, {0x00003014, 0x0000301F}, {0x00003030, 0x00003030},
+{0x0000303D, 0x0000303D}, {0x000030A0, 0x000030A0}, {0x000030FB, 0x000030FB}, {0x0000A4FE, 0x0000A4FF},
+{0x0000A60D, 0x0000A60F}, {0x0000A673, 0x0000A673}, {0x0000A67E, 0x0000A67E}, {0x0000A6F2, 0x0000A6F7},
+{0x0000A874, 0x0000A877}, {0x0000A8CE, 0x0000A8CF}, {0x0000A8F8, 0x0000A8FA}, {0x0000A8FC, 0x0000A8FC},
+{0x0000A92E, 0x0000A92F}, {0x0000A95F, 0x0000A95F}, {0x0000A9C1, 0x0000A9CD}, {0x0000A9DE, 0x0000A9DF},
+{0x0000AA5C, 0x0000AA5F}, {0x0000AADE, 0x0000AADF}, {0x0000AAF0, 0x0000AAF1}, {0x0000ABEB, 0x0000ABEB},
+{0x0000FD3E, 0x0000FD3F}, {0x0000FE10, 0x0000FE19}, {0x0000FE30, 0x0000FE52}, {0x0000FE54, 0x0000FE61},
+{0x0000FE63, 0x0000FE63}, {0x0000FE68, 0x0000FE68}, {0x0000FE6A, 0x0000FE6B}, {0x0000FF01, 0x0000FF03},
+{0x0000FF05, 0x0000FF0A}, {0x0000FF0C, 0x0000FF0F}, {0x0000FF1A, 0x0000FF1B}, {0x0000FF1F, 0x0000FF20},
+{0x0000FF3B, 0x0000FF3D}, {0x0000FF3F, 0x0000FF3F}, {0x0000FF5B, 0x0000FF5B}, {0x0000FF5D, 0x0000FF5D},
+{0x0000FF5F, 0x0000FF65}, {0x00010100, 0x00010102}, {0x0001039F, 0x0001039F}, {0x000103D0, 0x000103D0},
+{0x0001056F, 0x0001056F}, {0x00010857, 0x00010857}, {0x0001091F, 0x0001091F}, {0x0001093F, 0x0001093F},
+{0x00010A50, 0x00010A58}, {0x00010A7F, 0x00010A7F}, {0x00010AF0, 0x00010AF6}, {0x00010B39, 0x00010B3F},
+{0x00010B99, 0x00010B9C}, {0x00010EAD, 0x00010EAD}, {0x00010F55, 0x00010F59}, {0x00010F86, 0x00010F89},
+{0x00011047, 0x0001104D}, {0x000110BB, 0x000110BC}, {0x000110BE, 0x000110C1}, {0x00011140, 0x00011143},
+{0x00011174, 0x00011175}, {0x000111C5, 0x000111C8}, {0x000111CD, 0x000111CD}, {0x000111DB, 0x000111DB},
+{0x000111DD, 0x000111DF}, {0x00011238, 0x0001123D}, {0x000112A9, 0x000112A9}, {0x0001144B, 0x0001144F},
+{0x0001145A, 0x0001145B}, {0x0001145D, 0x0001145D}, {0x000114C6, 0x000114C6}, {0x000115C1, 0x000115D7},
+{0x00011641, 0x00011643}, {0x00011660, 0x0001166C}, {0x000116B9, 0x000116B9}, {0x0001173C, 0x0001173E},
+{0x0001183B, 0x0001183B}, {0x00011944, 0x00011946}, {0x000119E2, 0x000119E2}, {0x00011A3F, 0x00011A46},
+{0x00011A9A, 0x00011A9C}, {0x00011A9E, 0x00011AA2}, {0x00011B00, 0x00011B09}, {0x00011C41, 0x00011C45},
+{0x00011C70, 0x00011C71}, {0x00011EF7, 0x00011EF8}, {0x00011F43, 0x00011F4F}, {0x00011FFF, 0x00011FFF},
+{0x00012470, 0x00012474}, {0x00012FF1, 0x00012FF2}, {0x00016A6E, 0x00016A6F}, {0x00016AF5, 0x00016AF5},
+{0x00016B37, 0x00016B3B}, {0x00016B44, 0x00016B44}, {0x00016E97, 0x00016E9A}, {0x00016FE2, 0x00016FE2},
+{0x0001BC9F, 0x0001BC9F}, {0x0001DA87, 0x0001DA8B}, {0x0001E95E, 0x0001E95F},
};
const std::vector> unicode_ranges_symbol = {
@@ -328,170 +367,172 @@ const std::vector> unicode_ranges_symbol = {
{0x00000375, 0x00000375}, {0x00000384, 0x00000385}, {0x000003F6, 0x000003F6}, {0x00000482, 0x00000482},
{0x0000058D, 0x0000058F}, {0x00000606, 0x00000608}, {0x0000060B, 0x0000060B}, {0x0000060E, 0x0000060F},
{0x000006DE, 0x000006DE}, {0x000006E9, 0x000006E9}, {0x000006FD, 0x000006FE}, {0x000007F6, 0x000007F6},
-{0x000007FE, 0x000007FF}, {0x000009F2, 0x000009F3}, {0x000009FA, 0x000009FB}, {0x00000AF1, 0x00000AF1},
-{0x00000B70, 0x00000B70}, {0x00000BF3, 0x00000BFA}, {0x00000C7F, 0x00000C7F}, {0x00000D4F, 0x00000D4F},
-{0x00000D79, 0x00000D79}, {0x00000E3F, 0x00000E3F}, {0x00000F01, 0x00000F03}, {0x00000F13, 0x00000F13},
-{0x00000F15, 0x00000F17}, {0x00000F1A, 0x00000F1F}, {0x00000F34, 0x00000F34}, {0x00000F36, 0x00000F36},
-{0x00000F38, 0x00000F38}, {0x00000FBE, 0x00000FC5}, {0x00000FC7, 0x00000FCC}, {0x00000FCE, 0x00000FCF},
-{0x00000FD5, 0x00000FD8}, {0x0000109E, 0x0000109F}, {0x00001390, 0x00001399}, {0x0000166D, 0x0000166D},
-{0x000017DB, 0x000017DB}, {0x00001940, 0x00001940}, {0x000019DE, 0x000019FF}, {0x00001B61, 0x00001B6A},
-{0x00001B74, 0x00001B7C}, {0x00001FBD, 0x00001FBD}, {0x00001FBF, 0x00001FC1}, {0x00001FCD, 0x00001FCF},
-{0x00001FDD, 0x00001FDF}, {0x00001FED, 0x00001FEF}, {0x00001FFD, 0x00001FFE}, {0x00002044, 0x00002044},
-{0x00002052, 0x00002052}, {0x0000207A, 0x0000207C}, {0x0000208A, 0x0000208C}, {0x000020A0, 0x000020BF},
-{0x00002100, 0x00002101}, {0x00002103, 0x00002106}, {0x00002108, 0x00002109}, {0x00002114, 0x00002114},
-{0x00002116, 0x00002118}, {0x0000211E, 0x00002123}, {0x00002125, 0x00002125}, {0x00002127, 0x00002127},
-{0x00002129, 0x00002129}, {0x0000212E, 0x0000212E}, {0x0000213A, 0x0000213B}, {0x00002140, 0x00002144},
-{0x0000214A, 0x0000214D}, {0x0000214F, 0x0000214F}, {0x0000218A, 0x0000218B}, {0x00002190, 0x00002307},
-{0x0000230C, 0x00002328}, {0x0000232B, 0x00002426}, {0x00002440, 0x0000244A}, {0x0000249C, 0x000024E9},
-{0x00002500, 0x00002767}, {0x00002794, 0x000027C4}, {0x000027C7, 0x000027E5}, {0x000027F0, 0x00002982},
-{0x00002999, 0x000029D7}, {0x000029DC, 0x000029FB}, {0x000029FE, 0x00002B73}, {0x00002B76, 0x00002B95},
-{0x00002B97, 0x00002BFF}, {0x00002CE5, 0x00002CEA}, {0x00002E50, 0x00002E51}, {0x00002E80, 0x00002E99},
-{0x00002E9B, 0x00002EF3}, {0x00002F00, 0x00002FD5}, {0x00002FF0, 0x00002FFB}, {0x00003004, 0x00003004},
-{0x00003012, 0x00003013}, {0x00003020, 0x00003020}, {0x00003036, 0x00003037}, {0x0000303E, 0x0000303F},
-{0x0000309B, 0x0000309C}, {0x00003190, 0x00003191}, {0x00003196, 0x0000319F}, {0x000031C0, 0x000031E3},
-{0x00003200, 0x0000321E}, {0x0000322A, 0x00003247}, {0x00003250, 0x00003250}, {0x00003260, 0x0000327F},
-{0x0000328A, 0x000032B0}, {0x000032C0, 0x000033FF}, {0x00004DC0, 0x00004DFF}, {0x0000A490, 0x0000A4C6},
-{0x0000A700, 0x0000A716}, {0x0000A720, 0x0000A721}, {0x0000A789, 0x0000A78A}, {0x0000A828, 0x0000A82B},
-{0x0000A836, 0x0000A839}, {0x0000AA77, 0x0000AA79}, {0x0000AB5B, 0x0000AB5B}, {0x0000AB6A, 0x0000AB6B},
-{0x0000FB29, 0x0000FB29}, {0x0000FBB2, 0x0000FBC1}, {0x0000FDFC, 0x0000FDFD}, {0x0000FE62, 0x0000FE62},
+{0x000007FE, 0x000007FF}, {0x00000888, 0x00000888}, {0x000009F2, 0x000009F3}, {0x000009FA, 0x000009FB},
+{0x00000AF1, 0x00000AF1}, {0x00000B70, 0x00000B70}, {0x00000BF3, 0x00000BFA}, {0x00000C7F, 0x00000C7F},
+{0x00000D4F, 0x00000D4F}, {0x00000D79, 0x00000D79}, {0x00000E3F, 0x00000E3F}, {0x00000F01, 0x00000F03},
+{0x00000F13, 0x00000F13}, {0x00000F15, 0x00000F17}, {0x00000F1A, 0x00000F1F}, {0x00000F34, 0x00000F34},
+{0x00000F36, 0x00000F36}, {0x00000F38, 0x00000F38}, {0x00000FBE, 0x00000FC5}, {0x00000FC7, 0x00000FCC},
+{0x00000FCE, 0x00000FCF}, {0x00000FD5, 0x00000FD8}, {0x0000109E, 0x0000109F}, {0x00001390, 0x00001399},
+{0x0000166D, 0x0000166D}, {0x000017DB, 0x000017DB}, {0x00001940, 0x00001940}, {0x000019DE, 0x000019FF},
+{0x00001B61, 0x00001B6A}, {0x00001B74, 0x00001B7C}, {0x00001FBD, 0x00001FBD}, {0x00001FBF, 0x00001FC1},
+{0x00001FCD, 0x00001FCF}, {0x00001FDD, 0x00001FDF}, {0x00001FED, 0x00001FEF}, {0x00001FFD, 0x00001FFE},
+{0x00002044, 0x00002044}, {0x00002052, 0x00002052}, {0x0000207A, 0x0000207C}, {0x0000208A, 0x0000208C},
+{0x000020A0, 0x000020C0}, {0x00002100, 0x00002101}, {0x00002103, 0x00002106}, {0x00002108, 0x00002109},
+{0x00002114, 0x00002114}, {0x00002116, 0x00002118}, {0x0000211E, 0x00002123}, {0x00002125, 0x00002125},
+{0x00002127, 0x00002127}, {0x00002129, 0x00002129}, {0x0000212E, 0x0000212E}, {0x0000213A, 0x0000213B},
+{0x00002140, 0x00002144}, {0x0000214A, 0x0000214D}, {0x0000214F, 0x0000214F}, {0x0000218A, 0x0000218B},
+{0x00002190, 0x00002307}, {0x0000230C, 0x00002328}, {0x0000232B, 0x00002426}, {0x00002440, 0x0000244A},
+{0x0000249C, 0x000024E9}, {0x00002500, 0x00002767}, {0x00002794, 0x000027C4}, {0x000027C7, 0x000027E5},
+{0x000027F0, 0x00002982}, {0x00002999, 0x000029D7}, {0x000029DC, 0x000029FB}, {0x000029FE, 0x00002B73},
+{0x00002B76, 0x00002B95}, {0x00002B97, 0x00002BFF}, {0x00002CE5, 0x00002CEA}, {0x00002E50, 0x00002E51},
+{0x00002E80, 0x00002E99}, {0x00002E9B, 0x00002EF3}, {0x00002F00, 0x00002FD5}, {0x00002FF0, 0x00002FFF},
+{0x00003004, 0x00003004}, {0x00003012, 0x00003013}, {0x00003020, 0x00003020}, {0x00003036, 0x00003037},
+{0x0000303E, 0x0000303F}, {0x0000309B, 0x0000309C}, {0x00003190, 0x00003191}, {0x00003196, 0x0000319F},
+{0x000031C0, 0x000031E3}, {0x000031EF, 0x000031EF}, {0x00003200, 0x0000321E}, {0x0000322A, 0x00003247},
+{0x00003250, 0x00003250}, {0x00003260, 0x0000327F}, {0x0000328A, 0x000032B0}, {0x000032C0, 0x000033FF},
+{0x00004DC0, 0x00004DFF}, {0x0000A490, 0x0000A4C6}, {0x0000A700, 0x0000A716}, {0x0000A720, 0x0000A721},
+{0x0000A789, 0x0000A78A}, {0x0000A828, 0x0000A82B}, {0x0000A836, 0x0000A839}, {0x0000AA77, 0x0000AA79},
+{0x0000AB5B, 0x0000AB5B}, {0x0000AB6A, 0x0000AB6B}, {0x0000FB29, 0x0000FB29}, {0x0000FBB2, 0x0000FBC2},
+{0x0000FD40, 0x0000FD4F}, {0x0000FDCF, 0x0000FDCF}, {0x0000FDFC, 0x0000FDFF}, {0x0000FE62, 0x0000FE62},
{0x0000FE64, 0x0000FE66}, {0x0000FE69, 0x0000FE69}, {0x0000FF04, 0x0000FF04}, {0x0000FF0B, 0x0000FF0B},
{0x0000FF1C, 0x0000FF1E}, {0x0000FF3E, 0x0000FF3E}, {0x0000FF40, 0x0000FF40}, {0x0000FF5C, 0x0000FF5C},
{0x0000FF5E, 0x0000FF5E}, {0x0000FFE0, 0x0000FFE6}, {0x0000FFE8, 0x0000FFEE}, {0x0000FFFC, 0x0000FFFD},
{0x00010137, 0x0001013F}, {0x00010179, 0x00010189}, {0x0001018C, 0x0001018E}, {0x00010190, 0x0001019C},
{0x000101A0, 0x000101A0}, {0x000101D0, 0x000101FC}, {0x00010877, 0x00010878}, {0x00010AC8, 0x00010AC8},
{0x0001173F, 0x0001173F}, {0x00011FD5, 0x00011FF1}, {0x00016B3C, 0x00016B3F}, {0x00016B45, 0x00016B45},
-{0x0001BC9C, 0x0001BC9C}, {0x0001D000, 0x0001D0F5}, {0x0001D100, 0x0001D126}, {0x0001D129, 0x0001D164},
-{0x0001D16A, 0x0001D16C}, {0x0001D183, 0x0001D184}, {0x0001D18C, 0x0001D1A9}, {0x0001D1AE, 0x0001D1E8},
-{0x0001D200, 0x0001D241}, {0x0001D245, 0x0001D245}, {0x0001D300, 0x0001D356}, {0x0001D6C1, 0x0001D6C1},
-{0x0001D6DB, 0x0001D6DB}, {0x0001D6FB, 0x0001D6FB}, {0x0001D715, 0x0001D715}, {0x0001D735, 0x0001D735},
-{0x0001D74F, 0x0001D74F}, {0x0001D76F, 0x0001D76F}, {0x0001D789, 0x0001D789}, {0x0001D7A9, 0x0001D7A9},
-{0x0001D7C3, 0x0001D7C3}, {0x0001D800, 0x0001D9FF}, {0x0001DA37, 0x0001DA3A}, {0x0001DA6D, 0x0001DA74},
-{0x0001DA76, 0x0001DA83}, {0x0001DA85, 0x0001DA86}, {0x0001E14F, 0x0001E14F}, {0x0001E2FF, 0x0001E2FF},
-{0x0001ECAC, 0x0001ECAC}, {0x0001ECB0, 0x0001ECB0}, {0x0001ED2E, 0x0001ED2E}, {0x0001EEF0, 0x0001EEF1},
-{0x0001F000, 0x0001F02B}, {0x0001F030, 0x0001F093}, {0x0001F0A0, 0x0001F0AE}, {0x0001F0B1, 0x0001F0BF},
-{0x0001F0C1, 0x0001F0CF}, {0x0001F0D1, 0x0001F0F5}, {0x0001F10D, 0x0001F1AD}, {0x0001F1E6, 0x0001F202},
-{0x0001F210, 0x0001F23B}, {0x0001F240, 0x0001F248}, {0x0001F250, 0x0001F251}, {0x0001F260, 0x0001F265},
-{0x0001F300, 0x0001F6D7}, {0x0001F6E0, 0x0001F6EC}, {0x0001F6F0, 0x0001F6FC}, {0x0001F700, 0x0001F773},
-{0x0001F780, 0x0001F7D8}, {0x0001F7E0, 0x0001F7EB}, {0x0001F800, 0x0001F80B}, {0x0001F810, 0x0001F847},
-{0x0001F850, 0x0001F859}, {0x0001F860, 0x0001F887}, {0x0001F890, 0x0001F8AD}, {0x0001F8B0, 0x0001F8B1},
-{0x0001F900, 0x0001F978}, {0x0001F97A, 0x0001F9CB}, {0x0001F9CD, 0x0001FA53}, {0x0001FA60, 0x0001FA6D},
-{0x0001FA70, 0x0001FA74}, {0x0001FA78, 0x0001FA7A}, {0x0001FA80, 0x0001FA86}, {0x0001FA90, 0x0001FAA8},
-{0x0001FAB0, 0x0001FAB6}, {0x0001FAC0, 0x0001FAC2}, {0x0001FAD0, 0x0001FAD6}, {0x0001FB00, 0x0001FB92},
+{0x0001BC9C, 0x0001BC9C}, {0x0001CF50, 0x0001CFC3}, {0x0001D000, 0x0001D0F5}, {0x0001D100, 0x0001D126},
+{0x0001D129, 0x0001D164}, {0x0001D16A, 0x0001D16C}, {0x0001D183, 0x0001D184}, {0x0001D18C, 0x0001D1A9},
+{0x0001D1AE, 0x0001D1EA}, {0x0001D200, 0x0001D241}, {0x0001D245, 0x0001D245}, {0x0001D300, 0x0001D356},
+{0x0001D6C1, 0x0001D6C1}, {0x0001D6DB, 0x0001D6DB}, {0x0001D6FB, 0x0001D6FB}, {0x0001D715, 0x0001D715},
+{0x0001D735, 0x0001D735}, {0x0001D74F, 0x0001D74F}, {0x0001D76F, 0x0001D76F}, {0x0001D789, 0x0001D789},
+{0x0001D7A9, 0x0001D7A9}, {0x0001D7C3, 0x0001D7C3}, {0x0001D800, 0x0001D9FF}, {0x0001DA37, 0x0001DA3A},
+{0x0001DA6D, 0x0001DA74}, {0x0001DA76, 0x0001DA83}, {0x0001DA85, 0x0001DA86}, {0x0001E14F, 0x0001E14F},
+{0x0001E2FF, 0x0001E2FF}, {0x0001ECAC, 0x0001ECAC}, {0x0001ECB0, 0x0001ECB0}, {0x0001ED2E, 0x0001ED2E},
+{0x0001EEF0, 0x0001EEF1}, {0x0001F000, 0x0001F02B}, {0x0001F030, 0x0001F093}, {0x0001F0A0, 0x0001F0AE},
+{0x0001F0B1, 0x0001F0BF}, {0x0001F0C1, 0x0001F0CF}, {0x0001F0D1, 0x0001F0F5}, {0x0001F10D, 0x0001F1AD},
+{0x0001F1E6, 0x0001F202}, {0x0001F210, 0x0001F23B}, {0x0001F240, 0x0001F248}, {0x0001F250, 0x0001F251},
+{0x0001F260, 0x0001F265}, {0x0001F300, 0x0001F6D7}, {0x0001F6DC, 0x0001F6EC}, {0x0001F6F0, 0x0001F6FC},
+{0x0001F700, 0x0001F776}, {0x0001F77B, 0x0001F7D9}, {0x0001F7E0, 0x0001F7EB}, {0x0001F7F0, 0x0001F7F0},
+{0x0001F800, 0x0001F80B}, {0x0001F810, 0x0001F847}, {0x0001F850, 0x0001F859}, {0x0001F860, 0x0001F887},
+{0x0001F890, 0x0001F8AD}, {0x0001F8B0, 0x0001F8B1}, {0x0001F900, 0x0001FA53}, {0x0001FA60, 0x0001FA6D},
+{0x0001FA70, 0x0001FA7C}, {0x0001FA80, 0x0001FA88}, {0x0001FA90, 0x0001FABD}, {0x0001FABF, 0x0001FAC5},
+{0x0001FACE, 0x0001FADB}, {0x0001FAE0, 0x0001FAE8}, {0x0001FAF0, 0x0001FAF8}, {0x0001FB00, 0x0001FB92},
{0x0001FB94, 0x0001FBCA},
};
const std::vector> unicode_ranges_control = {
-{0x00000000, 0x00000008}, {0x0000000E, 0x0000001B}, {0x0000007F, 0x00000084}, {0x00000086, 0x0000009F},
-{0x000000AD, 0x000000AD}, {0x00000378, 0x00000379}, {0x00000380, 0x00000383}, {0x0000038B, 0x0000038B},
-{0x0000038D, 0x0000038D}, {0x000003A2, 0x000003A2}, {0x00000530, 0x00000530}, {0x00000557, 0x00000558},
-{0x0000058B, 0x0000058C}, {0x00000590, 0x00000590}, {0x000005C8, 0x000005CF}, {0x000005EB, 0x000005EE},
-{0x000005F5, 0x00000605}, {0x0000061C, 0x0000061D}, {0x000006DD, 0x000006DD}, {0x0000070E, 0x0000070F},
-{0x0000074B, 0x0000074C}, {0x000007B2, 0x000007BF}, {0x000007FB, 0x000007FC}, {0x0000082E, 0x0000082F},
-{0x0000083F, 0x0000083F}, {0x0000085C, 0x0000085D}, {0x0000085F, 0x0000085F}, {0x0000086B, 0x0000089F},
-{0x000008B5, 0x000008B5}, {0x000008C8, 0x000008D2}, {0x000008E2, 0x000008E2}, {0x00000984, 0x00000984},
-{0x0000098D, 0x0000098E}, {0x00000991, 0x00000992}, {0x000009A9, 0x000009A9}, {0x000009B1, 0x000009B1},
-{0x000009B3, 0x000009B5}, {0x000009BA, 0x000009BB}, {0x000009C5, 0x000009C6}, {0x000009C9, 0x000009CA},
-{0x000009CF, 0x000009D6}, {0x000009D8, 0x000009DB}, {0x000009DE, 0x000009DE}, {0x000009E4, 0x000009E5},
-{0x000009FF, 0x00000A00}, {0x00000A04, 0x00000A04}, {0x00000A0B, 0x00000A0E}, {0x00000A11, 0x00000A12},
-{0x00000A29, 0x00000A29}, {0x00000A31, 0x00000A31}, {0x00000A34, 0x00000A34}, {0x00000A37, 0x00000A37},
-{0x00000A3A, 0x00000A3B}, {0x00000A3D, 0x00000A3D}, {0x00000A43, 0x00000A46}, {0x00000A49, 0x00000A4A},
-{0x00000A4E, 0x00000A50}, {0x00000A52, 0x00000A58}, {0x00000A5D, 0x00000A5D}, {0x00000A5F, 0x00000A65},
-{0x00000A77, 0x00000A80}, {0x00000A84, 0x00000A84}, {0x00000A8E, 0x00000A8E}, {0x00000A92, 0x00000A92},
-{0x00000AA9, 0x00000AA9}, {0x00000AB1, 0x00000AB1}, {0x00000AB4, 0x00000AB4}, {0x00000ABA, 0x00000ABB},
-{0x00000AC6, 0x00000AC6}, {0x00000ACA, 0x00000ACA}, {0x00000ACE, 0x00000ACF}, {0x00000AD1, 0x00000ADF},
-{0x00000AE4, 0x00000AE5}, {0x00000AF2, 0x00000AF8}, {0x00000B00, 0x00000B00}, {0x00000B04, 0x00000B04},
-{0x00000B0D, 0x00000B0E}, {0x00000B11, 0x00000B12}, {0x00000B29, 0x00000B29}, {0x00000B31, 0x00000B31},
-{0x00000B34, 0x00000B34}, {0x00000B3A, 0x00000B3B}, {0x00000B45, 0x00000B46}, {0x00000B49, 0x00000B4A},
-{0x00000B4E, 0x00000B54}, {0x00000B58, 0x00000B5B}, {0x00000B5E, 0x00000B5E}, {0x00000B64, 0x00000B65},
-{0x00000B78, 0x00000B81}, {0x00000B84, 0x00000B84}, {0x00000B8B, 0x00000B8D}, {0x00000B91, 0x00000B91},
-{0x00000B96, 0x00000B98}, {0x00000B9B, 0x00000B9B}, {0x00000B9D, 0x00000B9D}, {0x00000BA0, 0x00000BA2},
-{0x00000BA5, 0x00000BA7}, {0x00000BAB, 0x00000BAD}, {0x00000BBA, 0x00000BBD}, {0x00000BC3, 0x00000BC5},
-{0x00000BC9, 0x00000BC9}, {0x00000BCE, 0x00000BCF}, {0x00000BD1, 0x00000BD6}, {0x00000BD8, 0x00000BE5},
-{0x00000BFB, 0x00000BFF}, {0x00000C0D, 0x00000C0D}, {0x00000C11, 0x00000C11}, {0x00000C29, 0x00000C29},
-{0x00000C3A, 0x00000C3C}, {0x00000C45, 0x00000C45}, {0x00000C49, 0x00000C49}, {0x00000C4E, 0x00000C54},
-{0x00000C57, 0x00000C57}, {0x00000C5B, 0x00000C5F}, {0x00000C64, 0x00000C65}, {0x00000C70, 0x00000C76},
-{0x00000C8D, 0x00000C8D}, {0x00000C91, 0x00000C91}, {0x00000CA9, 0x00000CA9}, {0x00000CB4, 0x00000CB4},
-{0x00000CBA, 0x00000CBB}, {0x00000CC5, 0x00000CC5}, {0x00000CC9, 0x00000CC9}, {0x00000CCE, 0x00000CD4},
-{0x00000CD7, 0x00000CDD}, {0x00000CDF, 0x00000CDF}, {0x00000CE4, 0x00000CE5}, {0x00000CF0, 0x00000CF0},
-{0x00000CF3, 0x00000CFF}, {0x00000D0D, 0x00000D0D}, {0x00000D11, 0x00000D11}, {0x00000D45, 0x00000D45},
-{0x00000D49, 0x00000D49}, {0x00000D50, 0x00000D53}, {0x00000D64, 0x00000D65}, {0x00000D80, 0x00000D80},
-{0x00000D84, 0x00000D84}, {0x00000D97, 0x00000D99}, {0x00000DB2, 0x00000DB2}, {0x00000DBC, 0x00000DBC},
-{0x00000DBE, 0x00000DBF}, {0x00000DC7, 0x00000DC9}, {0x00000DCB, 0x00000DCE}, {0x00000DD5, 0x00000DD5},
-{0x00000DD7, 0x00000DD7}, {0x00000DE0, 0x00000DE5}, {0x00000DF0, 0x00000DF1}, {0x00000DF5, 0x00000E00},
-{0x00000E3B, 0x00000E3E}, {0x00000E5C, 0x00000E80}, {0x00000E83, 0x00000E83}, {0x00000E85, 0x00000E85},
-{0x00000E8B, 0x00000E8B}, {0x00000EA4, 0x00000EA4}, {0x00000EA6, 0x00000EA6}, {0x00000EBE, 0x00000EBF},
-{0x00000EC5, 0x00000EC5}, {0x00000EC7, 0x00000EC7}, {0x00000ECE, 0x00000ECF}, {0x00000EDA, 0x00000EDB},
-{0x00000EE0, 0x00000EFF}, {0x00000F48, 0x00000F48}, {0x00000F6D, 0x00000F70}, {0x00000F98, 0x00000F98},
-{0x00000FBD, 0x00000FBD}, {0x00000FCD, 0x00000FCD}, {0x00000FDB, 0x00000FFF}, {0x000010C6, 0x000010C6},
-{0x000010C8, 0x000010CC}, {0x000010CE, 0x000010CF}, {0x00001249, 0x00001249}, {0x0000124E, 0x0000124F},
-{0x00001257, 0x00001257}, {0x00001259, 0x00001259}, {0x0000125E, 0x0000125F}, {0x00001289, 0x00001289},
-{0x0000128E, 0x0000128F}, {0x000012B1, 0x000012B1}, {0x000012B6, 0x000012B7}, {0x000012BF, 0x000012BF},
-{0x000012C1, 0x000012C1}, {0x000012C6, 0x000012C7}, {0x000012D7, 0x000012D7}, {0x00001311, 0x00001311},
-{0x00001316, 0x00001317}, {0x0000135B, 0x0000135C}, {0x0000137D, 0x0000137F}, {0x0000139A, 0x0000139F},
-{0x000013F6, 0x000013F7}, {0x000013FE, 0x000013FF}, {0x0000169D, 0x0000169F}, {0x000016F9, 0x000016FF},
-{0x0000170D, 0x0000170D}, {0x00001715, 0x0000171F}, {0x00001737, 0x0000173F}, {0x00001754, 0x0000175F},
-{0x0000176D, 0x0000176D}, {0x00001771, 0x00001771}, {0x00001774, 0x0000177F}, {0x000017DE, 0x000017DF},
-{0x000017EA, 0x000017EF}, {0x000017FA, 0x000017FF}, {0x0000180E, 0x0000180F}, {0x0000181A, 0x0000181F},
-{0x00001879, 0x0000187F}, {0x000018AB, 0x000018AF}, {0x000018F6, 0x000018FF}, {0x0000191F, 0x0000191F},
-{0x0000192C, 0x0000192F}, {0x0000193C, 0x0000193F}, {0x00001941, 0x00001943}, {0x0000196E, 0x0000196F},
-{0x00001975, 0x0000197F}, {0x000019AC, 0x000019AF}, {0x000019CA, 0x000019CF}, {0x000019DB, 0x000019DD},
-{0x00001A1C, 0x00001A1D}, {0x00001A5F, 0x00001A5F}, {0x00001A7D, 0x00001A7E}, {0x00001A8A, 0x00001A8F},
-{0x00001A9A, 0x00001A9F}, {0x00001AAE, 0x00001AAF}, {0x00001AC1, 0x00001AFF}, {0x00001B4C, 0x00001B4F},
-{0x00001B7D, 0x00001B7F}, {0x00001BF4, 0x00001BFB}, {0x00001C38, 0x00001C3A}, {0x00001C4A, 0x00001C4C},
-{0x00001C89, 0x00001C8F}, {0x00001CBB, 0x00001CBC}, {0x00001CC8, 0x00001CCF}, {0x00001CFB, 0x00001CFF},
-{0x00001DFA, 0x00001DFA}, {0x00001F16, 0x00001F17}, {0x00001F1E, 0x00001F1F}, {0x00001F46, 0x00001F47},
+{0x00000000, 0x0000001F}, {0x0000007F, 0x0000009F}, {0x000000AD, 0x000000AD}, {0x00000378, 0x00000379},
+{0x00000380, 0x00000383}, {0x0000038B, 0x0000038B}, {0x0000038D, 0x0000038D}, {0x000003A2, 0x000003A2},
+{0x00000530, 0x00000530}, {0x00000557, 0x00000558}, {0x0000058B, 0x0000058C}, {0x00000590, 0x00000590},
+{0x000005C8, 0x000005CF}, {0x000005EB, 0x000005EE}, {0x000005F5, 0x00000605}, {0x0000061C, 0x0000061C},
+{0x000006DD, 0x000006DD}, {0x0000070E, 0x0000070F}, {0x0000074B, 0x0000074C}, {0x000007B2, 0x000007BF},
+{0x000007FB, 0x000007FC}, {0x0000082E, 0x0000082F}, {0x0000083F, 0x0000083F}, {0x0000085C, 0x0000085D},
+{0x0000085F, 0x0000085F}, {0x0000086B, 0x0000086F}, {0x0000088F, 0x00000897}, {0x000008E2, 0x000008E2},
+{0x00000984, 0x00000984}, {0x0000098D, 0x0000098E}, {0x00000991, 0x00000992}, {0x000009A9, 0x000009A9},
+{0x000009B1, 0x000009B1}, {0x000009B3, 0x000009B5}, {0x000009BA, 0x000009BB}, {0x000009C5, 0x000009C6},
+{0x000009C9, 0x000009CA}, {0x000009CF, 0x000009D6}, {0x000009D8, 0x000009DB}, {0x000009DE, 0x000009DE},
+{0x000009E4, 0x000009E5}, {0x000009FF, 0x00000A00}, {0x00000A04, 0x00000A04}, {0x00000A0B, 0x00000A0E},
+{0x00000A11, 0x00000A12}, {0x00000A29, 0x00000A29}, {0x00000A31, 0x00000A31}, {0x00000A34, 0x00000A34},
+{0x00000A37, 0x00000A37}, {0x00000A3A, 0x00000A3B}, {0x00000A3D, 0x00000A3D}, {0x00000A43, 0x00000A46},
+{0x00000A49, 0x00000A4A}, {0x00000A4E, 0x00000A50}, {0x00000A52, 0x00000A58}, {0x00000A5D, 0x00000A5D},
+{0x00000A5F, 0x00000A65}, {0x00000A77, 0x00000A80}, {0x00000A84, 0x00000A84}, {0x00000A8E, 0x00000A8E},
+{0x00000A92, 0x00000A92}, {0x00000AA9, 0x00000AA9}, {0x00000AB1, 0x00000AB1}, {0x00000AB4, 0x00000AB4},
+{0x00000ABA, 0x00000ABB}, {0x00000AC6, 0x00000AC6}, {0x00000ACA, 0x00000ACA}, {0x00000ACE, 0x00000ACF},
+{0x00000AD1, 0x00000ADF}, {0x00000AE4, 0x00000AE5}, {0x00000AF2, 0x00000AF8}, {0x00000B00, 0x00000B00},
+{0x00000B04, 0x00000B04}, {0x00000B0D, 0x00000B0E}, {0x00000B11, 0x00000B12}, {0x00000B29, 0x00000B29},
+{0x00000B31, 0x00000B31}, {0x00000B34, 0x00000B34}, {0x00000B3A, 0x00000B3B}, {0x00000B45, 0x00000B46},
+{0x00000B49, 0x00000B4A}, {0x00000B4E, 0x00000B54}, {0x00000B58, 0x00000B5B}, {0x00000B5E, 0x00000B5E},
+{0x00000B64, 0x00000B65}, {0x00000B78, 0x00000B81}, {0x00000B84, 0x00000B84}, {0x00000B8B, 0x00000B8D},
+{0x00000B91, 0x00000B91}, {0x00000B96, 0x00000B98}, {0x00000B9B, 0x00000B9B}, {0x00000B9D, 0x00000B9D},
+{0x00000BA0, 0x00000BA2}, {0x00000BA5, 0x00000BA7}, {0x00000BAB, 0x00000BAD}, {0x00000BBA, 0x00000BBD},
+{0x00000BC3, 0x00000BC5}, {0x00000BC9, 0x00000BC9}, {0x00000BCE, 0x00000BCF}, {0x00000BD1, 0x00000BD6},
+{0x00000BD8, 0x00000BE5}, {0x00000BFB, 0x00000BFF}, {0x00000C0D, 0x00000C0D}, {0x00000C11, 0x00000C11},
+{0x00000C29, 0x00000C29}, {0x00000C3A, 0x00000C3B}, {0x00000C45, 0x00000C45}, {0x00000C49, 0x00000C49},
+{0x00000C4E, 0x00000C54}, {0x00000C57, 0x00000C57}, {0x00000C5B, 0x00000C5C}, {0x00000C5E, 0x00000C5F},
+{0x00000C64, 0x00000C65}, {0x00000C70, 0x00000C76}, {0x00000C8D, 0x00000C8D}, {0x00000C91, 0x00000C91},
+{0x00000CA9, 0x00000CA9}, {0x00000CB4, 0x00000CB4}, {0x00000CBA, 0x00000CBB}, {0x00000CC5, 0x00000CC5},
+{0x00000CC9, 0x00000CC9}, {0x00000CCE, 0x00000CD4}, {0x00000CD7, 0x00000CDC}, {0x00000CDF, 0x00000CDF},
+{0x00000CE4, 0x00000CE5}, {0x00000CF0, 0x00000CF0}, {0x00000CF4, 0x00000CFF}, {0x00000D0D, 0x00000D0D},
+{0x00000D11, 0x00000D11}, {0x00000D45, 0x00000D45}, {0x00000D49, 0x00000D49}, {0x00000D50, 0x00000D53},
+{0x00000D64, 0x00000D65}, {0x00000D80, 0x00000D80}, {0x00000D84, 0x00000D84}, {0x00000D97, 0x00000D99},
+{0x00000DB2, 0x00000DB2}, {0x00000DBC, 0x00000DBC}, {0x00000DBE, 0x00000DBF}, {0x00000DC7, 0x00000DC9},
+{0x00000DCB, 0x00000DCE}, {0x00000DD5, 0x00000DD5}, {0x00000DD7, 0x00000DD7}, {0x00000DE0, 0x00000DE5},
+{0x00000DF0, 0x00000DF1}, {0x00000DF5, 0x00000E00}, {0x00000E3B, 0x00000E3E}, {0x00000E5C, 0x00000E80},
+{0x00000E83, 0x00000E83}, {0x00000E85, 0x00000E85}, {0x00000E8B, 0x00000E8B}, {0x00000EA4, 0x00000EA4},
+{0x00000EA6, 0x00000EA6}, {0x00000EBE, 0x00000EBF}, {0x00000EC5, 0x00000EC5}, {0x00000EC7, 0x00000EC7},
+{0x00000ECF, 0x00000ECF}, {0x00000EDA, 0x00000EDB}, {0x00000EE0, 0x00000EFF}, {0x00000F48, 0x00000F48},
+{0x00000F6D, 0x00000F70}, {0x00000F98, 0x00000F98}, {0x00000FBD, 0x00000FBD}, {0x00000FCD, 0x00000FCD},
+{0x00000FDB, 0x00000FFF}, {0x000010C6, 0x000010C6}, {0x000010C8, 0x000010CC}, {0x000010CE, 0x000010CF},
+{0x00001249, 0x00001249}, {0x0000124E, 0x0000124F}, {0x00001257, 0x00001257}, {0x00001259, 0x00001259},
+{0x0000125E, 0x0000125F}, {0x00001289, 0x00001289}, {0x0000128E, 0x0000128F}, {0x000012B1, 0x000012B1},
+{0x000012B6, 0x000012B7}, {0x000012BF, 0x000012BF}, {0x000012C1, 0x000012C1}, {0x000012C6, 0x000012C7},
+{0x000012D7, 0x000012D7}, {0x00001311, 0x00001311}, {0x00001316, 0x00001317}, {0x0000135B, 0x0000135C},
+{0x0000137D, 0x0000137F}, {0x0000139A, 0x0000139F}, {0x000013F6, 0x000013F7}, {0x000013FE, 0x000013FF},
+{0x0000169D, 0x0000169F}, {0x000016F9, 0x000016FF}, {0x00001716, 0x0000171E}, {0x00001737, 0x0000173F},
+{0x00001754, 0x0000175F}, {0x0000176D, 0x0000176D}, {0x00001771, 0x00001771}, {0x00001774, 0x0000177F},
+{0x000017DE, 0x000017DF}, {0x000017EA, 0x000017EF}, {0x000017FA, 0x000017FF}, {0x0000180E, 0x0000180E},
+{0x0000181A, 0x0000181F}, {0x00001879, 0x0000187F}, {0x000018AB, 0x000018AF}, {0x000018F6, 0x000018FF},
+{0x0000191F, 0x0000191F}, {0x0000192C, 0x0000192F}, {0x0000193C, 0x0000193F}, {0x00001941, 0x00001943},
+{0x0000196E, 0x0000196F}, {0x00001975, 0x0000197F}, {0x000019AC, 0x000019AF}, {0x000019CA, 0x000019CF},
+{0x000019DB, 0x000019DD}, {0x00001A1C, 0x00001A1D}, {0x00001A5F, 0x00001A5F}, {0x00001A7D, 0x00001A7E},
+{0x00001A8A, 0x00001A8F}, {0x00001A9A, 0x00001A9F}, {0x00001AAE, 0x00001AAF}, {0x00001ACF, 0x00001AFF},
+{0x00001B4D, 0x00001B4F}, {0x00001B7F, 0x00001B7F}, {0x00001BF4, 0x00001BFB}, {0x00001C38, 0x00001C3A},
+{0x00001C4A, 0x00001C4C}, {0x00001C89, 0x00001C8F}, {0x00001CBB, 0x00001CBC}, {0x00001CC8, 0x00001CCF},
+{0x00001CFB, 0x00001CFF}, {0x00001F16, 0x00001F17}, {0x00001F1E, 0x00001F1F}, {0x00001F46, 0x00001F47},
{0x00001F4E, 0x00001F4F}, {0x00001F58, 0x00001F58}, {0x00001F5A, 0x00001F5A}, {0x00001F5C, 0x00001F5C},
{0x00001F5E, 0x00001F5E}, {0x00001F7E, 0x00001F7F}, {0x00001FB5, 0x00001FB5}, {0x00001FC5, 0x00001FC5},
{0x00001FD4, 0x00001FD5}, {0x00001FDC, 0x00001FDC}, {0x00001FF0, 0x00001FF1}, {0x00001FF5, 0x00001FF5},
{0x00001FFF, 0x00001FFF}, {0x0000200B, 0x0000200F}, {0x0000202A, 0x0000202E}, {0x00002060, 0x0000206F},
-{0x00002072, 0x00002073}, {0x0000208F, 0x0000208F}, {0x0000209D, 0x0000209F}, {0x000020C0, 0x000020CF},
+{0x00002072, 0x00002073}, {0x0000208F, 0x0000208F}, {0x0000209D, 0x0000209F}, {0x000020C1, 0x000020CF},
{0x000020F1, 0x000020FF}, {0x0000218C, 0x0000218F}, {0x00002427, 0x0000243F}, {0x0000244B, 0x0000245F},
-{0x00002B74, 0x00002B75}, {0x00002B96, 0x00002B96}, {0x00002C2F, 0x00002C2F}, {0x00002C5F, 0x00002C5F},
-{0x00002CF4, 0x00002CF8}, {0x00002D26, 0x00002D26}, {0x00002D28, 0x00002D2C}, {0x00002D2E, 0x00002D2F},
-{0x00002D68, 0x00002D6E}, {0x00002D71, 0x00002D7E}, {0x00002D97, 0x00002D9F}, {0x00002DA7, 0x00002DA7},
-{0x00002DAF, 0x00002DAF}, {0x00002DB7, 0x00002DB7}, {0x00002DBF, 0x00002DBF}, {0x00002DC7, 0x00002DC7},
-{0x00002DCF, 0x00002DCF}, {0x00002DD7, 0x00002DD7}, {0x00002DDF, 0x00002DDF}, {0x00002E53, 0x00002E7F},
-{0x00002E9A, 0x00002E9A}, {0x00002EF4, 0x00002EFF}, {0x00002FD6, 0x00002FEF}, {0x00002FFC, 0x00002FFF},
-{0x00003040, 0x00003040}, {0x00003097, 0x00003098}, {0x00003100, 0x00003104}, {0x00003130, 0x00003130},
-{0x0000318F, 0x0000318F}, {0x000031E4, 0x000031EF}, {0x0000321F, 0x0000321F}, {0x00009FFD, 0x00009FFF},
+{0x00002B74, 0x00002B75}, {0x00002B96, 0x00002B96}, {0x00002CF4, 0x00002CF8}, {0x00002D26, 0x00002D26},
+{0x00002D28, 0x00002D2C}, {0x00002D2E, 0x00002D2F}, {0x00002D68, 0x00002D6E}, {0x00002D71, 0x00002D7E},
+{0x00002D97, 0x00002D9F}, {0x00002DA7, 0x00002DA7}, {0x00002DAF, 0x00002DAF}, {0x00002DB7, 0x00002DB7},
+{0x00002DBF, 0x00002DBF}, {0x00002DC7, 0x00002DC7}, {0x00002DCF, 0x00002DCF}, {0x00002DD7, 0x00002DD7},
+{0x00002DDF, 0x00002DDF}, {0x00002E5E, 0x00002E7F}, {0x00002E9A, 0x00002E9A}, {0x00002EF4, 0x00002EFF},
+{0x00002FD6, 0x00002FEF}, {0x00003040, 0x00003040}, {0x00003097, 0x00003098}, {0x00003100, 0x00003104},
+{0x00003130, 0x00003130}, {0x0000318F, 0x0000318F}, {0x000031E4, 0x000031EE}, {0x0000321F, 0x0000321F},
{0x0000A48D, 0x0000A48F}, {0x0000A4C7, 0x0000A4CF}, {0x0000A62C, 0x0000A63F}, {0x0000A6F8, 0x0000A6FF},
-{0x0000A7C0, 0x0000A7C1}, {0x0000A7CB, 0x0000A7F4}, {0x0000A82D, 0x0000A82F}, {0x0000A83A, 0x0000A83F},
-{0x0000A878, 0x0000A87F}, {0x0000A8C6, 0x0000A8CD}, {0x0000A8DA, 0x0000A8DF}, {0x0000A954, 0x0000A95E},
-{0x0000A97D, 0x0000A97F}, {0x0000A9CE, 0x0000A9CE}, {0x0000A9DA, 0x0000A9DD}, {0x0000A9FF, 0x0000A9FF},
-{0x0000AA37, 0x0000AA3F}, {0x0000AA4E, 0x0000AA4F}, {0x0000AA5A, 0x0000AA5B}, {0x0000AAC3, 0x0000AADA},
-{0x0000AAF7, 0x0000AB00}, {0x0000AB07, 0x0000AB08}, {0x0000AB0F, 0x0000AB10}, {0x0000AB17, 0x0000AB1F},
-{0x0000AB27, 0x0000AB27}, {0x0000AB2F, 0x0000AB2F}, {0x0000AB6C, 0x0000AB6F}, {0x0000ABEE, 0x0000ABEF},
-{0x0000ABFA, 0x0000ABFF}, {0x0000D7A4, 0x0000D7AF}, {0x0000D7C7, 0x0000D7CA}, {0x0000D7FC, 0x0000F8FF},
-{0x0000FA6E, 0x0000FA6F}, {0x0000FADA, 0x0000FAFF}, {0x0000FB07, 0x0000FB12}, {0x0000FB18, 0x0000FB1C},
-{0x0000FB37, 0x0000FB37}, {0x0000FB3D, 0x0000FB3D}, {0x0000FB3F, 0x0000FB3F}, {0x0000FB42, 0x0000FB42},
-{0x0000FB45, 0x0000FB45}, {0x0000FBC2, 0x0000FBD2}, {0x0000FD40, 0x0000FD4F}, {0x0000FD90, 0x0000FD91},
-{0x0000FDC8, 0x0000FDEF}, {0x0000FDFE, 0x0000FDFF}, {0x0000FE1A, 0x0000FE1F}, {0x0000FE53, 0x0000FE53},
-{0x0000FE67, 0x0000FE67}, {0x0000FE6C, 0x0000FE6F}, {0x0000FE75, 0x0000FE75}, {0x0000FEFD, 0x0000FF00},
-{0x0000FFBF, 0x0000FFC1}, {0x0000FFC8, 0x0000FFC9}, {0x0000FFD0, 0x0000FFD1}, {0x0000FFD8, 0x0000FFD9},
-{0x0000FFDD, 0x0000FFDF}, {0x0000FFE7, 0x0000FFE7}, {0x0000FFEF, 0x0000FFFB}, {0x0000FFFE, 0x0000FFFF},
-{0x0001000C, 0x0001000C}, {0x00010027, 0x00010027}, {0x0001003B, 0x0001003B}, {0x0001003E, 0x0001003E},
-{0x0001004E, 0x0001004F}, {0x0001005E, 0x0001007F}, {0x000100FB, 0x000100FF}, {0x00010103, 0x00010106},
-{0x00010134, 0x00010136}, {0x0001018F, 0x0001018F}, {0x0001019D, 0x0001019F}, {0x000101A1, 0x000101CF},
-{0x000101FE, 0x0001027F}, {0x0001029D, 0x0001029F}, {0x000102D1, 0x000102DF}, {0x000102FC, 0x000102FF},
-{0x00010324, 0x0001032C}, {0x0001034B, 0x0001034F}, {0x0001037B, 0x0001037F}, {0x0001039E, 0x0001039E},
-{0x000103C4, 0x000103C7}, {0x000103D6, 0x000103FF}, {0x0001049E, 0x0001049F}, {0x000104AA, 0x000104AF},
-{0x000104D4, 0x000104D7}, {0x000104FC, 0x000104FF}, {0x00010528, 0x0001052F}, {0x00010564, 0x0001056E},
-{0x00010570, 0x000105FF}, {0x00010737, 0x0001073F}, {0x00010756, 0x0001075F}, {0x00010768, 0x000107FF},
-{0x00010806, 0x00010807}, {0x00010809, 0x00010809}, {0x00010836, 0x00010836}, {0x00010839, 0x0001083B},
-{0x0001083D, 0x0001083E}, {0x00010856, 0x00010856}, {0x0001089F, 0x000108A6}, {0x000108B0, 0x000108DF},
-{0x000108F3, 0x000108F3}, {0x000108F6, 0x000108FA}, {0x0001091C, 0x0001091E}, {0x0001093A, 0x0001093E},
-{0x00010940, 0x0001097F}, {0x000109B8, 0x000109BB}, {0x000109D0, 0x000109D1}, {0x00010A04, 0x00010A04},
-{0x00010A07, 0x00010A0B}, {0x00010A14, 0x00010A14}, {0x00010A18, 0x00010A18}, {0x00010A36, 0x00010A37},
-{0x00010A3B, 0x00010A3E}, {0x00010A49, 0x00010A4F}, {0x00010A59, 0x00010A5F}, {0x00010AA0, 0x00010ABF},
-{0x00010AE7, 0x00010AEA}, {0x00010AF7, 0x00010AFF}, {0x00010B36, 0x00010B38}, {0x00010B56, 0x00010B57},
-{0x00010B73, 0x00010B77}, {0x00010B92, 0x00010B98}, {0x00010B9D, 0x00010BA8}, {0x00010BB0, 0x00010BFF},
-{0x00010C49, 0x00010C7F}, {0x00010CB3, 0x00010CBF}, {0x00010CF3, 0x00010CF9}, {0x00010D28, 0x00010D2F},
-{0x00010D3A, 0x00010E5F}, {0x00010E7F, 0x00010E7F}, {0x00010EAA, 0x00010EAA}, {0x00010EAE, 0x00010EAF},
-{0x00010EB2, 0x00010EFF}, {0x00010F28, 0x00010F2F}, {0x00010F5A, 0x00010FAF}, {0x00010FCC, 0x00010FDF},
-{0x00010FF7, 0x00010FFF}, {0x0001104E, 0x00011051}, {0x00011070, 0x0001107E}, {0x000110BD, 0x000110BD},
-{0x000110C2, 0x000110CF}, {0x000110E9, 0x000110EF}, {0x000110FA, 0x000110FF}, {0x00011135, 0x00011135},
+{0x0000A7CB, 0x0000A7CF}, {0x0000A7D2, 0x0000A7D2}, {0x0000A7D4, 0x0000A7D4}, {0x0000A7DA, 0x0000A7F1},
+{0x0000A82D, 0x0000A82F}, {0x0000A83A, 0x0000A83F}, {0x0000A878, 0x0000A87F}, {0x0000A8C6, 0x0000A8CD},
+{0x0000A8DA, 0x0000A8DF}, {0x0000A954, 0x0000A95E}, {0x0000A97D, 0x0000A97F}, {0x0000A9CE, 0x0000A9CE},
+{0x0000A9DA, 0x0000A9DD}, {0x0000A9FF, 0x0000A9FF}, {0x0000AA37, 0x0000AA3F}, {0x0000AA4E, 0x0000AA4F},
+{0x0000AA5A, 0x0000AA5B}, {0x0000AAC3, 0x0000AADA}, {0x0000AAF7, 0x0000AB00}, {0x0000AB07, 0x0000AB08},
+{0x0000AB0F, 0x0000AB10}, {0x0000AB17, 0x0000AB1F}, {0x0000AB27, 0x0000AB27}, {0x0000AB2F, 0x0000AB2F},
+{0x0000AB6C, 0x0000AB6F}, {0x0000ABEE, 0x0000ABEF}, {0x0000ABFA, 0x0000ABFF}, {0x0000D7A4, 0x0000D7AF},
+{0x0000D7C7, 0x0000D7CA}, {0x0000D7FC, 0x0000F8FF}, {0x0000FA6E, 0x0000FA6F}, {0x0000FADA, 0x0000FAFF},
+{0x0000FB07, 0x0000FB12}, {0x0000FB18, 0x0000FB1C}, {0x0000FB37, 0x0000FB37}, {0x0000FB3D, 0x0000FB3D},
+{0x0000FB3F, 0x0000FB3F}, {0x0000FB42, 0x0000FB42}, {0x0000FB45, 0x0000FB45}, {0x0000FBC3, 0x0000FBD2},
+{0x0000FD90, 0x0000FD91}, {0x0000FDC8, 0x0000FDCE}, {0x0000FDD0, 0x0000FDEF}, {0x0000FE1A, 0x0000FE1F},
+{0x0000FE53, 0x0000FE53}, {0x0000FE67, 0x0000FE67}, {0x0000FE6C, 0x0000FE6F}, {0x0000FE75, 0x0000FE75},
+{0x0000FEFD, 0x0000FF00}, {0x0000FFBF, 0x0000FFC1}, {0x0000FFC8, 0x0000FFC9}, {0x0000FFD0, 0x0000FFD1},
+{0x0000FFD8, 0x0000FFD9}, {0x0000FFDD, 0x0000FFDF}, {0x0000FFE7, 0x0000FFE7}, {0x0000FFEF, 0x0000FFFB},
+{0x0000FFFE, 0x0000FFFF}, {0x0001000C, 0x0001000C}, {0x00010027, 0x00010027}, {0x0001003B, 0x0001003B},
+{0x0001003E, 0x0001003E}, {0x0001004E, 0x0001004F}, {0x0001005E, 0x0001007F}, {0x000100FB, 0x000100FF},
+{0x00010103, 0x00010106}, {0x00010134, 0x00010136}, {0x0001018F, 0x0001018F}, {0x0001019D, 0x0001019F},
+{0x000101A1, 0x000101CF}, {0x000101FE, 0x0001027F}, {0x0001029D, 0x0001029F}, {0x000102D1, 0x000102DF},
+{0x000102FC, 0x000102FF}, {0x00010324, 0x0001032C}, {0x0001034B, 0x0001034F}, {0x0001037B, 0x0001037F},
+{0x0001039E, 0x0001039E}, {0x000103C4, 0x000103C7}, {0x000103D6, 0x000103FF}, {0x0001049E, 0x0001049F},
+{0x000104AA, 0x000104AF}, {0x000104D4, 0x000104D7}, {0x000104FC, 0x000104FF}, {0x00010528, 0x0001052F},
+{0x00010564, 0x0001056E}, {0x0001057B, 0x0001057B}, {0x0001058B, 0x0001058B}, {0x00010593, 0x00010593},
+{0x00010596, 0x00010596}, {0x000105A2, 0x000105A2}, {0x000105B2, 0x000105B2}, {0x000105BA, 0x000105BA},
+{0x000105BD, 0x000105FF}, {0x00010737, 0x0001073F}, {0x00010756, 0x0001075F}, {0x00010768, 0x0001077F},
+{0x00010786, 0x00010786}, {0x000107B1, 0x000107B1}, {0x000107BB, 0x000107FF}, {0x00010806, 0x00010807},
+{0x00010809, 0x00010809}, {0x00010836, 0x00010836}, {0x00010839, 0x0001083B}, {0x0001083D, 0x0001083E},
+{0x00010856, 0x00010856}, {0x0001089F, 0x000108A6}, {0x000108B0, 0x000108DF}, {0x000108F3, 0x000108F3},
+{0x000108F6, 0x000108FA}, {0x0001091C, 0x0001091E}, {0x0001093A, 0x0001093E}, {0x00010940, 0x0001097F},
+{0x000109B8, 0x000109BB}, {0x000109D0, 0x000109D1}, {0x00010A04, 0x00010A04}, {0x00010A07, 0x00010A0B},
+{0x00010A14, 0x00010A14}, {0x00010A18, 0x00010A18}, {0x00010A36, 0x00010A37}, {0x00010A3B, 0x00010A3E},
+{0x00010A49, 0x00010A4F}, {0x00010A59, 0x00010A5F}, {0x00010AA0, 0x00010ABF}, {0x00010AE7, 0x00010AEA},
+{0x00010AF7, 0x00010AFF}, {0x00010B36, 0x00010B38}, {0x00010B56, 0x00010B57}, {0x00010B73, 0x00010B77},
+{0x00010B92, 0x00010B98}, {0x00010B9D, 0x00010BA8}, {0x00010BB0, 0x00010BFF}, {0x00010C49, 0x00010C7F},
+{0x00010CB3, 0x00010CBF}, {0x00010CF3, 0x00010CF9}, {0x00010D28, 0x00010D2F}, {0x00010D3A, 0x00010E5F},
+{0x00010E7F, 0x00010E7F}, {0x00010EAA, 0x00010EAA}, {0x00010EAE, 0x00010EAF}, {0x00010EB2, 0x00010EFC},
+{0x00010F28, 0x00010F2F}, {0x00010F5A, 0x00010F6F}, {0x00010F8A, 0x00010FAF}, {0x00010FCC, 0x00010FDF},
+{0x00010FF7, 0x00010FFF}, {0x0001104E, 0x00011051}, {0x00011076, 0x0001107E}, {0x000110BD, 0x000110BD},
+{0x000110C3, 0x000110CF}, {0x000110E9, 0x000110EF}, {0x000110FA, 0x000110FF}, {0x00011135, 0x00011135},
{0x00011148, 0x0001114F}, {0x00011177, 0x0001117F}, {0x000111E0, 0x000111E0}, {0x000111F5, 0x000111FF},
-{0x00011212, 0x00011212}, {0x0001123F, 0x0001127F}, {0x00011287, 0x00011287}, {0x00011289, 0x00011289},
+{0x00011212, 0x00011212}, {0x00011242, 0x0001127F}, {0x00011287, 0x00011287}, {0x00011289, 0x00011289},
{0x0001128E, 0x0001128E}, {0x0001129E, 0x0001129E}, {0x000112AA, 0x000112AF}, {0x000112EB, 0x000112EF},
{0x000112FA, 0x000112FF}, {0x00011304, 0x00011304}, {0x0001130D, 0x0001130E}, {0x00011311, 0x00011312},
{0x00011329, 0x00011329}, {0x00011331, 0x00011331}, {0x00011334, 0x00011334}, {0x0001133A, 0x0001133A},
@@ -499,59 +540,792 @@ const std::vector> unicode_ranges_control = {
{0x00011358, 0x0001135C}, {0x00011364, 0x00011365}, {0x0001136D, 0x0001136F}, {0x00011375, 0x000113FF},
{0x0001145C, 0x0001145C}, {0x00011462, 0x0001147F}, {0x000114C8, 0x000114CF}, {0x000114DA, 0x0001157F},
{0x000115B6, 0x000115B7}, {0x000115DE, 0x000115FF}, {0x00011645, 0x0001164F}, {0x0001165A, 0x0001165F},
-{0x0001166D, 0x0001167F}, {0x000116B9, 0x000116BF}, {0x000116CA, 0x000116FF}, {0x0001171B, 0x0001171C},
-{0x0001172C, 0x0001172F}, {0x00011740, 0x000117FF}, {0x0001183C, 0x0001189F}, {0x000118F3, 0x000118FE},
+{0x0001166D, 0x0001167F}, {0x000116BA, 0x000116BF}, {0x000116CA, 0x000116FF}, {0x0001171B, 0x0001171C},
+{0x0001172C, 0x0001172F}, {0x00011747, 0x000117FF}, {0x0001183C, 0x0001189F}, {0x000118F3, 0x000118FE},
{0x00011907, 0x00011908}, {0x0001190A, 0x0001190B}, {0x00011914, 0x00011914}, {0x00011917, 0x00011917},
{0x00011936, 0x00011936}, {0x00011939, 0x0001193A}, {0x00011947, 0x0001194F}, {0x0001195A, 0x0001199F},
{0x000119A8, 0x000119A9}, {0x000119D8, 0x000119D9}, {0x000119E5, 0x000119FF}, {0x00011A48, 0x00011A4F},
-{0x00011AA3, 0x00011ABF}, {0x00011AF9, 0x00011BFF}, {0x00011C09, 0x00011C09}, {0x00011C37, 0x00011C37},
-{0x00011C46, 0x00011C4F}, {0x00011C6D, 0x00011C6F}, {0x00011C90, 0x00011C91}, {0x00011CA8, 0x00011CA8},
-{0x00011CB7, 0x00011CFF}, {0x00011D07, 0x00011D07}, {0x00011D0A, 0x00011D0A}, {0x00011D37, 0x00011D39},
-{0x00011D3B, 0x00011D3B}, {0x00011D3E, 0x00011D3E}, {0x00011D48, 0x00011D4F}, {0x00011D5A, 0x00011D5F},
-{0x00011D66, 0x00011D66}, {0x00011D69, 0x00011D69}, {0x00011D8F, 0x00011D8F}, {0x00011D92, 0x00011D92},
-{0x00011D99, 0x00011D9F}, {0x00011DAA, 0x00011EDF}, {0x00011EF9, 0x00011FAF}, {0x00011FB1, 0x00011FBF},
+{0x00011AA3, 0x00011AAF}, {0x00011AF9, 0x00011AFF}, {0x00011B0A, 0x00011BFF}, {0x00011C09, 0x00011C09},
+{0x00011C37, 0x00011C37}, {0x00011C46, 0x00011C4F}, {0x00011C6D, 0x00011C6F}, {0x00011C90, 0x00011C91},
+{0x00011CA8, 0x00011CA8}, {0x00011CB7, 0x00011CFF}, {0x00011D07, 0x00011D07}, {0x00011D0A, 0x00011D0A},
+{0x00011D37, 0x00011D39}, {0x00011D3B, 0x00011D3B}, {0x00011D3E, 0x00011D3E}, {0x00011D48, 0x00011D4F},
+{0x00011D5A, 0x00011D5F}, {0x00011D66, 0x00011D66}, {0x00011D69, 0x00011D69}, {0x00011D8F, 0x00011D8F},
+{0x00011D92, 0x00011D92}, {0x00011D99, 0x00011D9F}, {0x00011DAA, 0x00011EDF}, {0x00011EF9, 0x00011EFF},
+{0x00011F11, 0x00011F11}, {0x00011F3B, 0x00011F3D}, {0x00011F5A, 0x00011FAF}, {0x00011FB1, 0x00011FBF},
{0x00011FF2, 0x00011FFE}, {0x0001239A, 0x000123FF}, {0x0001246F, 0x0001246F}, {0x00012475, 0x0001247F},
-{0x00012544, 0x00012FFF}, {0x0001342F, 0x000143FF}, {0x00014647, 0x000167FF}, {0x00016A39, 0x00016A3F},
-{0x00016A5F, 0x00016A5F}, {0x00016A6A, 0x00016A6D}, {0x00016A70, 0x00016ACF}, {0x00016AEE, 0x00016AEF},
-{0x00016AF6, 0x00016AFF}, {0x00016B46, 0x00016B4F}, {0x00016B5A, 0x00016B5A}, {0x00016B62, 0x00016B62},
-{0x00016B78, 0x00016B7C}, {0x00016B90, 0x00016E3F}, {0x00016E9B, 0x00016EFF}, {0x00016F4B, 0x00016F4E},
-{0x00016F88, 0x00016F8E}, {0x00016FA0, 0x00016FDF}, {0x00016FE5, 0x00016FEF}, {0x00016FF2, 0x00016FFF},
-{0x000187F8, 0x000187FF}, {0x00018CD6, 0x00018CFF}, {0x00018D09, 0x0001AFFF}, {0x0001B11F, 0x0001B14F},
-{0x0001B153, 0x0001B163}, {0x0001B168, 0x0001B16F}, {0x0001B2FC, 0x0001BBFF}, {0x0001BC6B, 0x0001BC6F},
-{0x0001BC7D, 0x0001BC7F}, {0x0001BC89, 0x0001BC8F}, {0x0001BC9A, 0x0001BC9B}, {0x0001BCA0, 0x0001CFFF},
-{0x0001D0F6, 0x0001D0FF}, {0x0001D127, 0x0001D128}, {0x0001D173, 0x0001D17A}, {0x0001D1E9, 0x0001D1FF},
-{0x0001D246, 0x0001D2DF}, {0x0001D2F4, 0x0001D2FF}, {0x0001D357, 0x0001D35F}, {0x0001D379, 0x0001D3FF},
+{0x00012544, 0x00012F8F}, {0x00012FF3, 0x00012FFF}, {0x00013430, 0x0001343F}, {0x00013456, 0x000143FF},
+{0x00014647, 0x000167FF}, {0x00016A39, 0x00016A3F}, {0x00016A5F, 0x00016A5F}, {0x00016A6A, 0x00016A6D},
+{0x00016ABF, 0x00016ABF}, {0x00016ACA, 0x00016ACF}, {0x00016AEE, 0x00016AEF}, {0x00016AF6, 0x00016AFF},
+{0x00016B46, 0x00016B4F}, {0x00016B5A, 0x00016B5A}, {0x00016B62, 0x00016B62}, {0x00016B78, 0x00016B7C},
+{0x00016B90, 0x00016E3F}, {0x00016E9B, 0x00016EFF}, {0x00016F4B, 0x00016F4E}, {0x00016F88, 0x00016F8E},
+{0x00016FA0, 0x00016FDF}, {0x00016FE5, 0x00016FEF}, {0x00016FF2, 0x00016FFF}, {0x000187F8, 0x000187FF},
+{0x00018CD6, 0x00018CFF}, {0x00018D09, 0x0001AFEF}, {0x0001AFF4, 0x0001AFF4}, {0x0001AFFC, 0x0001AFFC},
+{0x0001AFFF, 0x0001AFFF}, {0x0001B123, 0x0001B131}, {0x0001B133, 0x0001B14F}, {0x0001B153, 0x0001B154},
+{0x0001B156, 0x0001B163}, {0x0001B168, 0x0001B16F}, {0x0001B2FC, 0x0001BBFF}, {0x0001BC6B, 0x0001BC6F},
+{0x0001BC7D, 0x0001BC7F}, {0x0001BC89, 0x0001BC8F}, {0x0001BC9A, 0x0001BC9B}, {0x0001BCA0, 0x0001CEFF},
+{0x0001CF2E, 0x0001CF2F}, {0x0001CF47, 0x0001CF4F}, {0x0001CFC4, 0x0001CFFF}, {0x0001D0F6, 0x0001D0FF},
+{0x0001D127, 0x0001D128}, {0x0001D173, 0x0001D17A}, {0x0001D1EB, 0x0001D1FF}, {0x0001D246, 0x0001D2BF},
+{0x0001D2D4, 0x0001D2DF}, {0x0001D2F4, 0x0001D2FF}, {0x0001D357, 0x0001D35F}, {0x0001D379, 0x0001D3FF},
{0x0001D455, 0x0001D455}, {0x0001D49D, 0x0001D49D}, {0x0001D4A0, 0x0001D4A1}, {0x0001D4A3, 0x0001D4A4},
{0x0001D4A7, 0x0001D4A8}, {0x0001D4AD, 0x0001D4AD}, {0x0001D4BA, 0x0001D4BA}, {0x0001D4BC, 0x0001D4BC},
{0x0001D4C4, 0x0001D4C4}, {0x0001D506, 0x0001D506}, {0x0001D50B, 0x0001D50C}, {0x0001D515, 0x0001D515},
{0x0001D51D, 0x0001D51D}, {0x0001D53A, 0x0001D53A}, {0x0001D53F, 0x0001D53F}, {0x0001D545, 0x0001D545},
{0x0001D547, 0x0001D549}, {0x0001D551, 0x0001D551}, {0x0001D6A6, 0x0001D6A7}, {0x0001D7CC, 0x0001D7CD},
-{0x0001DA8C, 0x0001DA9A}, {0x0001DAA0, 0x0001DAA0}, {0x0001DAB0, 0x0001DFFF}, {0x0001E007, 0x0001E007},
-{0x0001E019, 0x0001E01A}, {0x0001E022, 0x0001E022}, {0x0001E025, 0x0001E025}, {0x0001E02B, 0x0001E0FF},
-{0x0001E12D, 0x0001E12F}, {0x0001E13E, 0x0001E13F}, {0x0001E14A, 0x0001E14D}, {0x0001E150, 0x0001E2BF},
-{0x0001E2FA, 0x0001E2FE}, {0x0001E300, 0x0001E7FF}, {0x0001E8C5, 0x0001E8C6}, {0x0001E8D7, 0x0001E8FF},
-{0x0001E94C, 0x0001E94F}, {0x0001E95A, 0x0001E95D}, {0x0001E960, 0x0001EC70}, {0x0001ECB5, 0x0001ED00},
-{0x0001ED3E, 0x0001EDFF}, {0x0001EE04, 0x0001EE04}, {0x0001EE20, 0x0001EE20}, {0x0001EE23, 0x0001EE23},
-{0x0001EE25, 0x0001EE26}, {0x0001EE28, 0x0001EE28}, {0x0001EE33, 0x0001EE33}, {0x0001EE38, 0x0001EE38},
-{0x0001EE3A, 0x0001EE3A}, {0x0001EE3C, 0x0001EE41}, {0x0001EE43, 0x0001EE46}, {0x0001EE48, 0x0001EE48},
-{0x0001EE4A, 0x0001EE4A}, {0x0001EE4C, 0x0001EE4C}, {0x0001EE50, 0x0001EE50}, {0x0001EE53, 0x0001EE53},
-{0x0001EE55, 0x0001EE56}, {0x0001EE58, 0x0001EE58}, {0x0001EE5A, 0x0001EE5A}, {0x0001EE5C, 0x0001EE5C},
-{0x0001EE5E, 0x0001EE5E}, {0x0001EE60, 0x0001EE60}, {0x0001EE63, 0x0001EE63}, {0x0001EE65, 0x0001EE66},
-{0x0001EE6B, 0x0001EE6B}, {0x0001EE73, 0x0001EE73}, {0x0001EE78, 0x0001EE78}, {0x0001EE7D, 0x0001EE7D},
-{0x0001EE7F, 0x0001EE7F}, {0x0001EE8A, 0x0001EE8A}, {0x0001EE9C, 0x0001EEA0}, {0x0001EEA4, 0x0001EEA4},
-{0x0001EEAA, 0x0001EEAA}, {0x0001EEBC, 0x0001EEEF}, {0x0001EEF2, 0x0001EFFF}, {0x0001F02C, 0x0001F02F},
-{0x0001F094, 0x0001F09F}, {0x0001F0AF, 0x0001F0B0}, {0x0001F0C0, 0x0001F0C0}, {0x0001F0D0, 0x0001F0D0},
-{0x0001F0F6, 0x0001F0FF}, {0x0001F1AE, 0x0001F1E5}, {0x0001F203, 0x0001F20F}, {0x0001F23C, 0x0001F23F},
-{0x0001F249, 0x0001F24F}, {0x0001F252, 0x0001F25F}, {0x0001F266, 0x0001F2FF}, {0x0001F6D8, 0x0001F6DF},
-{0x0001F6ED, 0x0001F6EF}, {0x0001F6FD, 0x0001F6FF}, {0x0001F774, 0x0001F77F}, {0x0001F7D9, 0x0001F7DF},
-{0x0001F7EC, 0x0001F7FF}, {0x0001F80C, 0x0001F80F}, {0x0001F848, 0x0001F84F}, {0x0001F85A, 0x0001F85F},
-{0x0001F888, 0x0001F88F}, {0x0001F8AE, 0x0001F8AF}, {0x0001F8B2, 0x0001F8FF}, {0x0001F979, 0x0001F979},
-{0x0001F9CC, 0x0001F9CC}, {0x0001FA54, 0x0001FA5F}, {0x0001FA6E, 0x0001FA6F}, {0x0001FA75, 0x0001FA77},
-{0x0001FA7B, 0x0001FA7F}, {0x0001FA87, 0x0001FA8F}, {0x0001FAA9, 0x0001FAAF}, {0x0001FAB7, 0x0001FABF},
-{0x0001FAC3, 0x0001FACF}, {0x0001FAD7, 0x0001FAFF}, {0x0001FB93, 0x0001FB93}, {0x0001FBCB, 0x0001FBEF},
-{0x0001FBFA, 0x0001FFFF}, {0x0002A6DE, 0x0002A6FF}, {0x0002B735, 0x0002B73F}, {0x0002B81E, 0x0002B81F},
-{0x0002CEA2, 0x0002CEAF}, {0x0002EBE1, 0x0002F7FF}, {0x0002FA1E, 0x0002FFFF}, {0x0003134B, 0x000E00FF},
-{0x000E01F0, 0x0010FFFF},
+{0x0001DA8C, 0x0001DA9A}, {0x0001DAA0, 0x0001DAA0}, {0x0001DAB0, 0x0001DEFF}, {0x0001DF1F, 0x0001DF24},
+{0x0001DF2B, 0x0001DFFF}, {0x0001E007, 0x0001E007}, {0x0001E019, 0x0001E01A}, {0x0001E022, 0x0001E022},
+{0x0001E025, 0x0001E025}, {0x0001E02B, 0x0001E02F}, {0x0001E06E, 0x0001E08E}, {0x0001E090, 0x0001E0FF},
+{0x0001E12D, 0x0001E12F}, {0x0001E13E, 0x0001E13F}, {0x0001E14A, 0x0001E14D}, {0x0001E150, 0x0001E28F},
+{0x0001E2AF, 0x0001E2BF}, {0x0001E2FA, 0x0001E2FE}, {0x0001E300, 0x0001E4CF}, {0x0001E4FA, 0x0001E7DF},
+{0x0001E7E7, 0x0001E7E7}, {0x0001E7EC, 0x0001E7EC}, {0x0001E7EF, 0x0001E7EF}, {0x0001E7FF, 0x0001E7FF},
+{0x0001E8C5, 0x0001E8C6}, {0x0001E8D7, 0x0001E8FF}, {0x0001E94C, 0x0001E94F}, {0x0001E95A, 0x0001E95D},
+{0x0001E960, 0x0001EC70}, {0x0001ECB5, 0x0001ED00}, {0x0001ED3E, 0x0001EDFF}, {0x0001EE04, 0x0001EE04},
+{0x0001EE20, 0x0001EE20}, {0x0001EE23, 0x0001EE23}, {0x0001EE25, 0x0001EE26}, {0x0001EE28, 0x0001EE28},
+{0x0001EE33, 0x0001EE33}, {0x0001EE38, 0x0001EE38}, {0x0001EE3A, 0x0001EE3A}, {0x0001EE3C, 0x0001EE41},
+{0x0001EE43, 0x0001EE46}, {0x0001EE48, 0x0001EE48}, {0x0001EE4A, 0x0001EE4A}, {0x0001EE4C, 0x0001EE4C},
+{0x0001EE50, 0x0001EE50}, {0x0001EE53, 0x0001EE53}, {0x0001EE55, 0x0001EE56}, {0x0001EE58, 0x0001EE58},
+{0x0001EE5A, 0x0001EE5A}, {0x0001EE5C, 0x0001EE5C}, {0x0001EE5E, 0x0001EE5E}, {0x0001EE60, 0x0001EE60},
+{0x0001EE63, 0x0001EE63}, {0x0001EE65, 0x0001EE66}, {0x0001EE6B, 0x0001EE6B}, {0x0001EE73, 0x0001EE73},
+{0x0001EE78, 0x0001EE78}, {0x0001EE7D, 0x0001EE7D}, {0x0001EE7F, 0x0001EE7F}, {0x0001EE8A, 0x0001EE8A},
+{0x0001EE9C, 0x0001EEA0}, {0x0001EEA4, 0x0001EEA4}, {0x0001EEAA, 0x0001EEAA}, {0x0001EEBC, 0x0001EEEF},
+{0x0001EEF2, 0x0001EFFF}, {0x0001F02C, 0x0001F02F}, {0x0001F094, 0x0001F09F}, {0x0001F0AF, 0x0001F0B0},
+{0x0001F0C0, 0x0001F0C0}, {0x0001F0D0, 0x0001F0D0}, {0x0001F0F6, 0x0001F0FF}, {0x0001F1AE, 0x0001F1E5},
+{0x0001F203, 0x0001F20F}, {0x0001F23C, 0x0001F23F}, {0x0001F249, 0x0001F24F}, {0x0001F252, 0x0001F25F},
+{0x0001F266, 0x0001F2FF}, {0x0001F6D8, 0x0001F6DB}, {0x0001F6ED, 0x0001F6EF}, {0x0001F6FD, 0x0001F6FF},
+{0x0001F777, 0x0001F77A}, {0x0001F7DA, 0x0001F7DF}, {0x0001F7EC, 0x0001F7EF}, {0x0001F7F1, 0x0001F7FF},
+{0x0001F80C, 0x0001F80F}, {0x0001F848, 0x0001F84F}, {0x0001F85A, 0x0001F85F}, {0x0001F888, 0x0001F88F},
+{0x0001F8AE, 0x0001F8AF}, {0x0001F8B2, 0x0001F8FF}, {0x0001FA54, 0x0001FA5F}, {0x0001FA6E, 0x0001FA6F},
+{0x0001FA7D, 0x0001FA7F}, {0x0001FA89, 0x0001FA8F}, {0x0001FABE, 0x0001FABE}, {0x0001FAC6, 0x0001FACD},
+{0x0001FADC, 0x0001FADF}, {0x0001FAE9, 0x0001FAEF}, {0x0001FAF9, 0x0001FAFF}, {0x0001FB93, 0x0001FB93},
+{0x0001FBCB, 0x0001FBEF}, {0x0001FBFA, 0x0001FFFF}, {0x0002A6E0, 0x0002A6FF}, {0x0002B73A, 0x0002B73F},
+{0x0002B81E, 0x0002B81F}, {0x0002CEA2, 0x0002CEAF}, {0x0002EBE1, 0x0002EBEF}, {0x0002EE5E, 0x0002F7FF},
+{0x0002FA1E, 0x0002FFFF}, {0x0003134B, 0x0003134F}, {0x000323B0, 0x000E00FF}, {0x000E01F0, 0x0010FFFF},
+};
+
+const std::map unicode_map_lowercase = {
+{0x00000041, 0x00000061}, {0x00000042, 0x00000062}, {0x00000043, 0x00000063}, {0x00000044, 0x00000064},
+{0x00000045, 0x00000065}, {0x00000046, 0x00000066}, {0x00000047, 0x00000067}, {0x00000048, 0x00000068},
+{0x00000049, 0x00000069}, {0x0000004A, 0x0000006A}, {0x0000004B, 0x0000006B}, {0x0000004C, 0x0000006C},
+{0x0000004D, 0x0000006D}, {0x0000004E, 0x0000006E}, {0x0000004F, 0x0000006F}, {0x00000050, 0x00000070},
+{0x00000051, 0x00000071}, {0x00000052, 0x00000072}, {0x00000053, 0x00000073}, {0x00000054, 0x00000074},
+{0x00000055, 0x00000075}, {0x00000056, 0x00000076}, {0x00000057, 0x00000077}, {0x00000058, 0x00000078},
+{0x00000059, 0x00000079}, {0x0000005A, 0x0000007A}, {0x000000C0, 0x000000E0}, {0x000000C1, 0x000000E1},
+{0x000000C2, 0x000000E2}, {0x000000C3, 0x000000E3}, {0x000000C4, 0x000000E4}, {0x000000C5, 0x000000E5},
+{0x000000C6, 0x000000E6}, {0x000000C7, 0x000000E7}, {0x000000C8, 0x000000E8}, {0x000000C9, 0x000000E9},
+{0x000000CA, 0x000000EA}, {0x000000CB, 0x000000EB}, {0x000000CC, 0x000000EC}, {0x000000CD, 0x000000ED},
+{0x000000CE, 0x000000EE}, {0x000000CF, 0x000000EF}, {0x000000D0, 0x000000F0}, {0x000000D1, 0x000000F1},
+{0x000000D2, 0x000000F2}, {0x000000D3, 0x000000F3}, {0x000000D4, 0x000000F4}, {0x000000D5, 0x000000F5},
+{0x000000D6, 0x000000F6}, {0x000000D8, 0x000000F8}, {0x000000D9, 0x000000F9}, {0x000000DA, 0x000000FA},
+{0x000000DB, 0x000000FB}, {0x000000DC, 0x000000FC}, {0x000000DD, 0x000000FD}, {0x000000DE, 0x000000FE},
+{0x00000100, 0x00000101}, {0x00000102, 0x00000103}, {0x00000104, 0x00000105}, {0x00000106, 0x00000107},
+{0x00000108, 0x00000109}, {0x0000010A, 0x0000010B}, {0x0000010C, 0x0000010D}, {0x0000010E, 0x0000010F},
+{0x00000110, 0x00000111}, {0x00000112, 0x00000113}, {0x00000114, 0x00000115}, {0x00000116, 0x00000117},
+{0x00000118, 0x00000119}, {0x0000011A, 0x0000011B}, {0x0000011C, 0x0000011D}, {0x0000011E, 0x0000011F},
+{0x00000120, 0x00000121}, {0x00000122, 0x00000123}, {0x00000124, 0x00000125}, {0x00000126, 0x00000127},
+{0x00000128, 0x00000129}, {0x0000012A, 0x0000012B}, {0x0000012C, 0x0000012D}, {0x0000012E, 0x0000012F},
+{0x00000130, 0x00000069}, {0x00000132, 0x00000133}, {0x00000134, 0x00000135}, {0x00000136, 0x00000137},
+{0x00000139, 0x0000013A}, {0x0000013B, 0x0000013C}, {0x0000013D, 0x0000013E}, {0x0000013F, 0x00000140},
+{0x00000141, 0x00000142}, {0x00000143, 0x00000144}, {0x00000145, 0x00000146}, {0x00000147, 0x00000148},
+{0x0000014A, 0x0000014B}, {0x0000014C, 0x0000014D}, {0x0000014E, 0x0000014F}, {0x00000150, 0x00000151},
+{0x00000152, 0x00000153}, {0x00000154, 0x00000155}, {0x00000156, 0x00000157}, {0x00000158, 0x00000159},
+{0x0000015A, 0x0000015B}, {0x0000015C, 0x0000015D}, {0x0000015E, 0x0000015F}, {0x00000160, 0x00000161},
+{0x00000162, 0x00000163}, {0x00000164, 0x00000165}, {0x00000166, 0x00000167}, {0x00000168, 0x00000169},
+{0x0000016A, 0x0000016B}, {0x0000016C, 0x0000016D}, {0x0000016E, 0x0000016F}, {0x00000170, 0x00000171},
+{0x00000172, 0x00000173}, {0x00000174, 0x00000175}, {0x00000176, 0x00000177}, {0x00000178, 0x000000FF},
+{0x00000179, 0x0000017A}, {0x0000017B, 0x0000017C}, {0x0000017D, 0x0000017E}, {0x00000181, 0x00000253},
+{0x00000182, 0x00000183}, {0x00000184, 0x00000185}, {0x00000186, 0x00000254}, {0x00000187, 0x00000188},
+{0x00000189, 0x00000256}, {0x0000018A, 0x00000257}, {0x0000018B, 0x0000018C}, {0x0000018E, 0x000001DD},
+{0x0000018F, 0x00000259}, {0x00000190, 0x0000025B}, {0x00000191, 0x00000192}, {0x00000193, 0x00000260},
+{0x00000194, 0x00000263}, {0x00000196, 0x00000269}, {0x00000197, 0x00000268}, {0x00000198, 0x00000199},
+{0x0000019C, 0x0000026F}, {0x0000019D, 0x00000272}, {0x0000019F, 0x00000275}, {0x000001A0, 0x000001A1},
+{0x000001A2, 0x000001A3}, {0x000001A4, 0x000001A5}, {0x000001A6, 0x00000280}, {0x000001A7, 0x000001A8},
+{0x000001A9, 0x00000283}, {0x000001AC, 0x000001AD}, {0x000001AE, 0x00000288}, {0x000001AF, 0x000001B0},
+{0x000001B1, 0x0000028A}, {0x000001B2, 0x0000028B}, {0x000001B3, 0x000001B4}, {0x000001B5, 0x000001B6},
+{0x000001B7, 0x00000292}, {0x000001B8, 0x000001B9}, {0x000001BC, 0x000001BD}, {0x000001C4, 0x000001C6},
+{0x000001C5, 0x000001C6}, {0x000001C7, 0x000001C9}, {0x000001C8, 0x000001C9}, {0x000001CA, 0x000001CC},
+{0x000001CB, 0x000001CC}, {0x000001CD, 0x000001CE}, {0x000001CF, 0x000001D0}, {0x000001D1, 0x000001D2},
+{0x000001D3, 0x000001D4}, {0x000001D5, 0x000001D6}, {0x000001D7, 0x000001D8}, {0x000001D9, 0x000001DA},
+{0x000001DB, 0x000001DC}, {0x000001DE, 0x000001DF}, {0x000001E0, 0x000001E1}, {0x000001E2, 0x000001E3},
+{0x000001E4, 0x000001E5}, {0x000001E6, 0x000001E7}, {0x000001E8, 0x000001E9}, {0x000001EA, 0x000001EB},
+{0x000001EC, 0x000001ED}, {0x000001EE, 0x000001EF}, {0x000001F1, 0x000001F3}, {0x000001F2, 0x000001F3},
+{0x000001F4, 0x000001F5}, {0x000001F6, 0x00000195}, {0x000001F7, 0x000001BF}, {0x000001F8, 0x000001F9},
+{0x000001FA, 0x000001FB}, {0x000001FC, 0x000001FD}, {0x000001FE, 0x000001FF}, {0x00000200, 0x00000201},
+{0x00000202, 0x00000203}, {0x00000204, 0x00000205}, {0x00000206, 0x00000207}, {0x00000208, 0x00000209},
+{0x0000020A, 0x0000020B}, {0x0000020C, 0x0000020D}, {0x0000020E, 0x0000020F}, {0x00000210, 0x00000211},
+{0x00000212, 0x00000213}, {0x00000214, 0x00000215}, {0x00000216, 0x00000217}, {0x00000218, 0x00000219},
+{0x0000021A, 0x0000021B}, {0x0000021C, 0x0000021D}, {0x0000021E, 0x0000021F}, {0x00000220, 0x0000019E},
+{0x00000222, 0x00000223}, {0x00000224, 0x00000225}, {0x00000226, 0x00000227}, {0x00000228, 0x00000229},
+{0x0000022A, 0x0000022B}, {0x0000022C, 0x0000022D}, {0x0000022E, 0x0000022F}, {0x00000230, 0x00000231},
+{0x00000232, 0x00000233}, {0x0000023A, 0x00002C65}, {0x0000023B, 0x0000023C}, {0x0000023D, 0x0000019A},
+{0x0000023E, 0x00002C66}, {0x00000241, 0x00000242}, {0x00000243, 0x00000180}, {0x00000244, 0x00000289},
+{0x00000245, 0x0000028C}, {0x00000246, 0x00000247}, {0x00000248, 0x00000249}, {0x0000024A, 0x0000024B},
+{0x0000024C, 0x0000024D}, {0x0000024E, 0x0000024F}, {0x00000370, 0x00000371}, {0x00000372, 0x00000373},
+{0x00000376, 0x00000377}, {0x0000037F, 0x000003F3}, {0x00000386, 0x000003AC}, {0x00000388, 0x000003AD},
+{0x00000389, 0x000003AE}, {0x0000038A, 0x000003AF}, {0x0000038C, 0x000003CC}, {0x0000038E, 0x000003CD},
+{0x0000038F, 0x000003CE}, {0x00000391, 0x000003B1}, {0x00000392, 0x000003B2}, {0x00000393, 0x000003B3},
+{0x00000394, 0x000003B4}, {0x00000395, 0x000003B5}, {0x00000396, 0x000003B6}, {0x00000397, 0x000003B7},
+{0x00000398, 0x000003B8}, {0x00000399, 0x000003B9}, {0x0000039A, 0x000003BA}, {0x0000039B, 0x000003BB},
+{0x0000039C, 0x000003BC}, {0x0000039D, 0x000003BD}, {0x0000039E, 0x000003BE}, {0x0000039F, 0x000003BF},
+{0x000003A0, 0x000003C0}, {0x000003A1, 0x000003C1}, {0x000003A3, 0x000003C3}, {0x000003A4, 0x000003C4},
+{0x000003A5, 0x000003C5}, {0x000003A6, 0x000003C6}, {0x000003A7, 0x000003C7}, {0x000003A8, 0x000003C8},
+{0x000003A9, 0x000003C9}, {0x000003AA, 0x000003CA}, {0x000003AB, 0x000003CB}, {0x000003CF, 0x000003D7},
+{0x000003D8, 0x000003D9}, {0x000003DA, 0x000003DB}, {0x000003DC, 0x000003DD}, {0x000003DE, 0x000003DF},
+{0x000003E0, 0x000003E1}, {0x000003E2, 0x000003E3}, {0x000003E4, 0x000003E5}, {0x000003E6, 0x000003E7},
+{0x000003E8, 0x000003E9}, {0x000003EA, 0x000003EB}, {0x000003EC, 0x000003ED}, {0x000003EE, 0x000003EF},
+{0x000003F4, 0x000003B8}, {0x000003F7, 0x000003F8}, {0x000003F9, 0x000003F2}, {0x000003FA, 0x000003FB},
+{0x000003FD, 0x0000037B}, {0x000003FE, 0x0000037C}, {0x000003FF, 0x0000037D}, {0x00000400, 0x00000450},
+{0x00000401, 0x00000451}, {0x00000402, 0x00000452}, {0x00000403, 0x00000453}, {0x00000404, 0x00000454},
+{0x00000405, 0x00000455}, {0x00000406, 0x00000456}, {0x00000407, 0x00000457}, {0x00000408, 0x00000458},
+{0x00000409, 0x00000459}, {0x0000040A, 0x0000045A}, {0x0000040B, 0x0000045B}, {0x0000040C, 0x0000045C},
+{0x0000040D, 0x0000045D}, {0x0000040E, 0x0000045E}, {0x0000040F, 0x0000045F}, {0x00000410, 0x00000430},
+{0x00000411, 0x00000431}, {0x00000412, 0x00000432}, {0x00000413, 0x00000433}, {0x00000414, 0x00000434},
+{0x00000415, 0x00000435}, {0x00000416, 0x00000436}, {0x00000417, 0x00000437}, {0x00000418, 0x00000438},
+{0x00000419, 0x00000439}, {0x0000041A, 0x0000043A}, {0x0000041B, 0x0000043B}, {0x0000041C, 0x0000043C},
+{0x0000041D, 0x0000043D}, {0x0000041E, 0x0000043E}, {0x0000041F, 0x0000043F}, {0x00000420, 0x00000440},
+{0x00000421, 0x00000441}, {0x00000422, 0x00000442}, {0x00000423, 0x00000443}, {0x00000424, 0x00000444},
+{0x00000425, 0x00000445}, {0x00000426, 0x00000446}, {0x00000427, 0x00000447}, {0x00000428, 0x00000448},
+{0x00000429, 0x00000449}, {0x0000042A, 0x0000044A}, {0x0000042B, 0x0000044B}, {0x0000042C, 0x0000044C},
+{0x0000042D, 0x0000044D}, {0x0000042E, 0x0000044E}, {0x0000042F, 0x0000044F}, {0x00000460, 0x00000461},
+{0x00000462, 0x00000463}, {0x00000464, 0x00000465}, {0x00000466, 0x00000467}, {0x00000468, 0x00000469},
+{0x0000046A, 0x0000046B}, {0x0000046C, 0x0000046D}, {0x0000046E, 0x0000046F}, {0x00000470, 0x00000471},
+{0x00000472, 0x00000473}, {0x00000474, 0x00000475}, {0x00000476, 0x00000477}, {0x00000478, 0x00000479},
+{0x0000047A, 0x0000047B}, {0x0000047C, 0x0000047D}, {0x0000047E, 0x0000047F}, {0x00000480, 0x00000481},
+{0x0000048A, 0x0000048B}, {0x0000048C, 0x0000048D}, {0x0000048E, 0x0000048F}, {0x00000490, 0x00000491},
+{0x00000492, 0x00000493}, {0x00000494, 0x00000495}, {0x00000496, 0x00000497}, {0x00000498, 0x00000499},
+{0x0000049A, 0x0000049B}, {0x0000049C, 0x0000049D}, {0x0000049E, 0x0000049F}, {0x000004A0, 0x000004A1},
+{0x000004A2, 0x000004A3}, {0x000004A4, 0x000004A5}, {0x000004A6, 0x000004A7}, {0x000004A8, 0x000004A9},
+{0x000004AA, 0x000004AB}, {0x000004AC, 0x000004AD}, {0x000004AE, 0x000004AF}, {0x000004B0, 0x000004B1},
+{0x000004B2, 0x000004B3}, {0x000004B4, 0x000004B5}, {0x000004B6, 0x000004B7}, {0x000004B8, 0x000004B9},
+{0x000004BA, 0x000004BB}, {0x000004BC, 0x000004BD}, {0x000004BE, 0x000004BF}, {0x000004C0, 0x000004CF},
+{0x000004C1, 0x000004C2}, {0x000004C3, 0x000004C4}, {0x000004C5, 0x000004C6}, {0x000004C7, 0x000004C8},
+{0x000004C9, 0x000004CA}, {0x000004CB, 0x000004CC}, {0x000004CD, 0x000004CE}, {0x000004D0, 0x000004D1},
+{0x000004D2, 0x000004D3}, {0x000004D4, 0x000004D5}, {0x000004D6, 0x000004D7}, {0x000004D8, 0x000004D9},
+{0x000004DA, 0x000004DB}, {0x000004DC, 0x000004DD}, {0x000004DE, 0x000004DF}, {0x000004E0, 0x000004E1},
+{0x000004E2, 0x000004E3}, {0x000004E4, 0x000004E5}, {0x000004E6, 0x000004E7}, {0x000004E8, 0x000004E9},
+{0x000004EA, 0x000004EB}, {0x000004EC, 0x000004ED}, {0x000004EE, 0x000004EF}, {0x000004F0, 0x000004F1},
+{0x000004F2, 0x000004F3}, {0x000004F4, 0x000004F5}, {0x000004F6, 0x000004F7}, {0x000004F8, 0x000004F9},
+{0x000004FA, 0x000004FB}, {0x000004FC, 0x000004FD}, {0x000004FE, 0x000004FF}, {0x00000500, 0x00000501},
+{0x00000502, 0x00000503}, {0x00000504, 0x00000505}, {0x00000506, 0x00000507}, {0x00000508, 0x00000509},
+{0x0000050A, 0x0000050B}, {0x0000050C, 0x0000050D}, {0x0000050E, 0x0000050F}, {0x00000510, 0x00000511},
+{0x00000512, 0x00000513}, {0x00000514, 0x00000515}, {0x00000516, 0x00000517}, {0x00000518, 0x00000519},
+{0x0000051A, 0x0000051B}, {0x0000051C, 0x0000051D}, {0x0000051E, 0x0000051F}, {0x00000520, 0x00000521},
+{0x00000522, 0x00000523}, {0x00000524, 0x00000525}, {0x00000526, 0x00000527}, {0x00000528, 0x00000529},
+{0x0000052A, 0x0000052B}, {0x0000052C, 0x0000052D}, {0x0000052E, 0x0000052F}, {0x00000531, 0x00000561},
+{0x00000532, 0x00000562}, {0x00000533, 0x00000563}, {0x00000534, 0x00000564}, {0x00000535, 0x00000565},
+{0x00000536, 0x00000566}, {0x00000537, 0x00000567}, {0x00000538, 0x00000568}, {0x00000539, 0x00000569},
+{0x0000053A, 0x0000056A}, {0x0000053B, 0x0000056B}, {0x0000053C, 0x0000056C}, {0x0000053D, 0x0000056D},
+{0x0000053E, 0x0000056E}, {0x0000053F, 0x0000056F}, {0x00000540, 0x00000570}, {0x00000541, 0x00000571},
+{0x00000542, 0x00000572}, {0x00000543, 0x00000573}, {0x00000544, 0x00000574}, {0x00000545, 0x00000575},
+{0x00000546, 0x00000576}, {0x00000547, 0x00000577}, {0x00000548, 0x00000578}, {0x00000549, 0x00000579},
+{0x0000054A, 0x0000057A}, {0x0000054B, 0x0000057B}, {0x0000054C, 0x0000057C}, {0x0000054D, 0x0000057D},
+{0x0000054E, 0x0000057E}, {0x0000054F, 0x0000057F}, {0x00000550, 0x00000580}, {0x00000551, 0x00000581},
+{0x00000552, 0x00000582}, {0x00000553, 0x00000583}, {0x00000554, 0x00000584}, {0x00000555, 0x00000585},
+{0x00000556, 0x00000586}, {0x000010A0, 0x00002D00}, {0x000010A1, 0x00002D01}, {0x000010A2, 0x00002D02},
+{0x000010A3, 0x00002D03}, {0x000010A4, 0x00002D04}, {0x000010A5, 0x00002D05}, {0x000010A6, 0x00002D06},
+{0x000010A7, 0x00002D07}, {0x000010A8, 0x00002D08}, {0x000010A9, 0x00002D09}, {0x000010AA, 0x00002D0A},
+{0x000010AB, 0x00002D0B}, {0x000010AC, 0x00002D0C}, {0x000010AD, 0x00002D0D}, {0x000010AE, 0x00002D0E},
+{0x000010AF, 0x00002D0F}, {0x000010B0, 0x00002D10}, {0x000010B1, 0x00002D11}, {0x000010B2, 0x00002D12},
+{0x000010B3, 0x00002D13}, {0x000010B4, 0x00002D14}, {0x000010B5, 0x00002D15}, {0x000010B6, 0x00002D16},
+{0x000010B7, 0x00002D17}, {0x000010B8, 0x00002D18}, {0x000010B9, 0x00002D19}, {0x000010BA, 0x00002D1A},
+{0x000010BB, 0x00002D1B}, {0x000010BC, 0x00002D1C}, {0x000010BD, 0x00002D1D}, {0x000010BE, 0x00002D1E},
+{0x000010BF, 0x00002D1F}, {0x000010C0, 0x00002D20}, {0x000010C1, 0x00002D21}, {0x000010C2, 0x00002D22},
+{0x000010C3, 0x00002D23}, {0x000010C4, 0x00002D24}, {0x000010C5, 0x00002D25}, {0x000010C7, 0x00002D27},
+{0x000010CD, 0x00002D2D}, {0x000013A0, 0x0000AB70}, {0x000013A1, 0x0000AB71}, {0x000013A2, 0x0000AB72},
+{0x000013A3, 0x0000AB73}, {0x000013A4, 0x0000AB74}, {0x000013A5, 0x0000AB75}, {0x000013A6, 0x0000AB76},
+{0x000013A7, 0x0000AB77}, {0x000013A8, 0x0000AB78}, {0x000013A9, 0x0000AB79}, {0x000013AA, 0x0000AB7A},
+{0x000013AB, 0x0000AB7B}, {0x000013AC, 0x0000AB7C}, {0x000013AD, 0x0000AB7D}, {0x000013AE, 0x0000AB7E},
+{0x000013AF, 0x0000AB7F}, {0x000013B0, 0x0000AB80}, {0x000013B1, 0x0000AB81}, {0x000013B2, 0x0000AB82},
+{0x000013B3, 0x0000AB83}, {0x000013B4, 0x0000AB84}, {0x000013B5, 0x0000AB85}, {0x000013B6, 0x0000AB86},
+{0x000013B7, 0x0000AB87}, {0x000013B8, 0x0000AB88}, {0x000013B9, 0x0000AB89}, {0x000013BA, 0x0000AB8A},
+{0x000013BB, 0x0000AB8B}, {0x000013BC, 0x0000AB8C}, {0x000013BD, 0x0000AB8D}, {0x000013BE, 0x0000AB8E},
+{0x000013BF, 0x0000AB8F}, {0x000013C0, 0x0000AB90}, {0x000013C1, 0x0000AB91}, {0x000013C2, 0x0000AB92},
+{0x000013C3, 0x0000AB93}, {0x000013C4, 0x0000AB94}, {0x000013C5, 0x0000AB95}, {0x000013C6, 0x0000AB96},
+{0x000013C7, 0x0000AB97}, {0x000013C8, 0x0000AB98}, {0x000013C9, 0x0000AB99}, {0x000013CA, 0x0000AB9A},
+{0x000013CB, 0x0000AB9B}, {0x000013CC, 0x0000AB9C}, {0x000013CD, 0x0000AB9D}, {0x000013CE, 0x0000AB9E},
+{0x000013CF, 0x0000AB9F}, {0x000013D0, 0x0000ABA0}, {0x000013D1, 0x0000ABA1}, {0x000013D2, 0x0000ABA2},
+{0x000013D3, 0x0000ABA3}, {0x000013D4, 0x0000ABA4}, {0x000013D5, 0x0000ABA5}, {0x000013D6, 0x0000ABA6},
+{0x000013D7, 0x0000ABA7}, {0x000013D8, 0x0000ABA8}, {0x000013D9, 0x0000ABA9}, {0x000013DA, 0x0000ABAA},
+{0x000013DB, 0x0000ABAB}, {0x000013DC, 0x0000ABAC}, {0x000013DD, 0x0000ABAD}, {0x000013DE, 0x0000ABAE},
+{0x000013DF, 0x0000ABAF}, {0x000013E0, 0x0000ABB0}, {0x000013E1, 0x0000ABB1}, {0x000013E2, 0x0000ABB2},
+{0x000013E3, 0x0000ABB3}, {0x000013E4, 0x0000ABB4}, {0x000013E5, 0x0000ABB5}, {0x000013E6, 0x0000ABB6},
+{0x000013E7, 0x0000ABB7}, {0x000013E8, 0x0000ABB8}, {0x000013E9, 0x0000ABB9}, {0x000013EA, 0x0000ABBA},
+{0x000013EB, 0x0000ABBB}, {0x000013EC, 0x0000ABBC}, {0x000013ED, 0x0000ABBD}, {0x000013EE, 0x0000ABBE},
+{0x000013EF, 0x0000ABBF}, {0x000013F0, 0x000013F8}, {0x000013F1, 0x000013F9}, {0x000013F2, 0x000013FA},
+{0x000013F3, 0x000013FB}, {0x000013F4, 0x000013FC}, {0x000013F5, 0x000013FD}, {0x00001C90, 0x000010D0},
+{0x00001C91, 0x000010D1}, {0x00001C92, 0x000010D2}, {0x00001C93, 0x000010D3}, {0x00001C94, 0x000010D4},
+{0x00001C95, 0x000010D5}, {0x00001C96, 0x000010D6}, {0x00001C97, 0x000010D7}, {0x00001C98, 0x000010D8},
+{0x00001C99, 0x000010D9}, {0x00001C9A, 0x000010DA}, {0x00001C9B, 0x000010DB}, {0x00001C9C, 0x000010DC},
+{0x00001C9D, 0x000010DD}, {0x00001C9E, 0x000010DE}, {0x00001C9F, 0x000010DF}, {0x00001CA0, 0x000010E0},
+{0x00001CA1, 0x000010E1}, {0x00001CA2, 0x000010E2}, {0x00001CA3, 0x000010E3}, {0x00001CA4, 0x000010E4},
+{0x00001CA5, 0x000010E5}, {0x00001CA6, 0x000010E6}, {0x00001CA7, 0x000010E7}, {0x00001CA8, 0x000010E8},
+{0x00001CA9, 0x000010E9}, {0x00001CAA, 0x000010EA}, {0x00001CAB, 0x000010EB}, {0x00001CAC, 0x000010EC},
+{0x00001CAD, 0x000010ED}, {0x00001CAE, 0x000010EE}, {0x00001CAF, 0x000010EF}, {0x00001CB0, 0x000010F0},
+{0x00001CB1, 0x000010F1}, {0x00001CB2, 0x000010F2}, {0x00001CB3, 0x000010F3}, {0x00001CB4, 0x000010F4},
+{0x00001CB5, 0x000010F5}, {0x00001CB6, 0x000010F6}, {0x00001CB7, 0x000010F7}, {0x00001CB8, 0x000010F8},
+{0x00001CB9, 0x000010F9}, {0x00001CBA, 0x000010FA}, {0x00001CBD, 0x000010FD}, {0x00001CBE, 0x000010FE},
+{0x00001CBF, 0x000010FF}, {0x00001E00, 0x00001E01}, {0x00001E02, 0x00001E03}, {0x00001E04, 0x00001E05},
+{0x00001E06, 0x00001E07}, {0x00001E08, 0x00001E09}, {0x00001E0A, 0x00001E0B}, {0x00001E0C, 0x00001E0D},
+{0x00001E0E, 0x00001E0F}, {0x00001E10, 0x00001E11}, {0x00001E12, 0x00001E13}, {0x00001E14, 0x00001E15},
+{0x00001E16, 0x00001E17}, {0x00001E18, 0x00001E19}, {0x00001E1A, 0x00001E1B}, {0x00001E1C, 0x00001E1D},
+{0x00001E1E, 0x00001E1F}, {0x00001E20, 0x00001E21}, {0x00001E22, 0x00001E23}, {0x00001E24, 0x00001E25},
+{0x00001E26, 0x00001E27}, {0x00001E28, 0x00001E29}, {0x00001E2A, 0x00001E2B}, {0x00001E2C, 0x00001E2D},
+{0x00001E2E, 0x00001E2F}, {0x00001E30, 0x00001E31}, {0x00001E32, 0x00001E33}, {0x00001E34, 0x00001E35},
+{0x00001E36, 0x00001E37}, {0x00001E38, 0x00001E39}, {0x00001E3A, 0x00001E3B}, {0x00001E3C, 0x00001E3D},
+{0x00001E3E, 0x00001E3F}, {0x00001E40, 0x00001E41}, {0x00001E42, 0x00001E43}, {0x00001E44, 0x00001E45},
+{0x00001E46, 0x00001E47}, {0x00001E48, 0x00001E49}, {0x00001E4A, 0x00001E4B}, {0x00001E4C, 0x00001E4D},
+{0x00001E4E, 0x00001E4F}, {0x00001E50, 0x00001E51}, {0x00001E52, 0x00001E53}, {0x00001E54, 0x00001E55},
+{0x00001E56, 0x00001E57}, {0x00001E58, 0x00001E59}, {0x00001E5A, 0x00001E5B}, {0x00001E5C, 0x00001E5D},
+{0x00001E5E, 0x00001E5F}, {0x00001E60, 0x00001E61}, {0x00001E62, 0x00001E63}, {0x00001E64, 0x00001E65},
+{0x00001E66, 0x00001E67}, {0x00001E68, 0x00001E69}, {0x00001E6A, 0x00001E6B}, {0x00001E6C, 0x00001E6D},
+{0x00001E6E, 0x00001E6F}, {0x00001E70, 0x00001E71}, {0x00001E72, 0x00001E73}, {0x00001E74, 0x00001E75},
+{0x00001E76, 0x00001E77}, {0x00001E78, 0x00001E79}, {0x00001E7A, 0x00001E7B}, {0x00001E7C, 0x00001E7D},
+{0x00001E7E, 0x00001E7F}, {0x00001E80, 0x00001E81}, {0x00001E82, 0x00001E83}, {0x00001E84, 0x00001E85},
+{0x00001E86, 0x00001E87}, {0x00001E88, 0x00001E89}, {0x00001E8A, 0x00001E8B}, {0x00001E8C, 0x00001E8D},
+{0x00001E8E, 0x00001E8F}, {0x00001E90, 0x00001E91}, {0x00001E92, 0x00001E93}, {0x00001E94, 0x00001E95},
+{0x00001E9E, 0x000000DF}, {0x00001EA0, 0x00001EA1}, {0x00001EA2, 0x00001EA3}, {0x00001EA4, 0x00001EA5},
+{0x00001EA6, 0x00001EA7}, {0x00001EA8, 0x00001EA9}, {0x00001EAA, 0x00001EAB}, {0x00001EAC, 0x00001EAD},
+{0x00001EAE, 0x00001EAF}, {0x00001EB0, 0x00001EB1}, {0x00001EB2, 0x00001EB3}, {0x00001EB4, 0x00001EB5},
+{0x00001EB6, 0x00001EB7}, {0x00001EB8, 0x00001EB9}, {0x00001EBA, 0x00001EBB}, {0x00001EBC, 0x00001EBD},
+{0x00001EBE, 0x00001EBF}, {0x00001EC0, 0x00001EC1}, {0x00001EC2, 0x00001EC3}, {0x00001EC4, 0x00001EC5},
+{0x00001EC6, 0x00001EC7}, {0x00001EC8, 0x00001EC9}, {0x00001ECA, 0x00001ECB}, {0x00001ECC, 0x00001ECD},
+{0x00001ECE, 0x00001ECF}, {0x00001ED0, 0x00001ED1}, {0x00001ED2, 0x00001ED3}, {0x00001ED4, 0x00001ED5},
+{0x00001ED6, 0x00001ED7}, {0x00001ED8, 0x00001ED9}, {0x00001EDA, 0x00001EDB}, {0x00001EDC, 0x00001EDD},
+{0x00001EDE, 0x00001EDF}, {0x00001EE0, 0x00001EE1}, {0x00001EE2, 0x00001EE3}, {0x00001EE4, 0x00001EE5},
+{0x00001EE6, 0x00001EE7}, {0x00001EE8, 0x00001EE9}, {0x00001EEA, 0x00001EEB}, {0x00001EEC, 0x00001EED},
+{0x00001EEE, 0x00001EEF}, {0x00001EF0, 0x00001EF1}, {0x00001EF2, 0x00001EF3}, {0x00001EF4, 0x00001EF5},
+{0x00001EF6, 0x00001EF7}, {0x00001EF8, 0x00001EF9}, {0x00001EFA, 0x00001EFB}, {0x00001EFC, 0x00001EFD},
+{0x00001EFE, 0x00001EFF}, {0x00001F08, 0x00001F00}, {0x00001F09, 0x00001F01}, {0x00001F0A, 0x00001F02},
+{0x00001F0B, 0x00001F03}, {0x00001F0C, 0x00001F04}, {0x00001F0D, 0x00001F05}, {0x00001F0E, 0x00001F06},
+{0x00001F0F, 0x00001F07}, {0x00001F18, 0x00001F10}, {0x00001F19, 0x00001F11}, {0x00001F1A, 0x00001F12},
+{0x00001F1B, 0x00001F13}, {0x00001F1C, 0x00001F14}, {0x00001F1D, 0x00001F15}, {0x00001F28, 0x00001F20},
+{0x00001F29, 0x00001F21}, {0x00001F2A, 0x00001F22}, {0x00001F2B, 0x00001F23}, {0x00001F2C, 0x00001F24},
+{0x00001F2D, 0x00001F25}, {0x00001F2E, 0x00001F26}, {0x00001F2F, 0x00001F27}, {0x00001F38, 0x00001F30},
+{0x00001F39, 0x00001F31}, {0x00001F3A, 0x00001F32}, {0x00001F3B, 0x00001F33}, {0x00001F3C, 0x00001F34},
+{0x00001F3D, 0x00001F35}, {0x00001F3E, 0x00001F36}, {0x00001F3F, 0x00001F37}, {0x00001F48, 0x00001F40},
+{0x00001F49, 0x00001F41}, {0x00001F4A, 0x00001F42}, {0x00001F4B, 0x00001F43}, {0x00001F4C, 0x00001F44},
+{0x00001F4D, 0x00001F45}, {0x00001F59, 0x00001F51}, {0x00001F5B, 0x00001F53}, {0x00001F5D, 0x00001F55},
+{0x00001F5F, 0x00001F57}, {0x00001F68, 0x00001F60}, {0x00001F69, 0x00001F61}, {0x00001F6A, 0x00001F62},
+{0x00001F6B, 0x00001F63}, {0x00001F6C, 0x00001F64}, {0x00001F6D, 0x00001F65}, {0x00001F6E, 0x00001F66},
+{0x00001F6F, 0x00001F67}, {0x00001F88, 0x00001F80}, {0x00001F89, 0x00001F81}, {0x00001F8A, 0x00001F82},
+{0x00001F8B, 0x00001F83}, {0x00001F8C, 0x00001F84}, {0x00001F8D, 0x00001F85}, {0x00001F8E, 0x00001F86},
+{0x00001F8F, 0x00001F87}, {0x00001F98, 0x00001F90}, {0x00001F99, 0x00001F91}, {0x00001F9A, 0x00001F92},
+{0x00001F9B, 0x00001F93}, {0x00001F9C, 0x00001F94}, {0x00001F9D, 0x00001F95}, {0x00001F9E, 0x00001F96},
+{0x00001F9F, 0x00001F97}, {0x00001FA8, 0x00001FA0}, {0x00001FA9, 0x00001FA1}, {0x00001FAA, 0x00001FA2},
+{0x00001FAB, 0x00001FA3}, {0x00001FAC, 0x00001FA4}, {0x00001FAD, 0x00001FA5}, {0x00001FAE, 0x00001FA6},
+{0x00001FAF, 0x00001FA7}, {0x00001FB8, 0x00001FB0}, {0x00001FB9, 0x00001FB1}, {0x00001FBA, 0x00001F70},
+{0x00001FBB, 0x00001F71}, {0x00001FBC, 0x00001FB3}, {0x00001FC8, 0x00001F72}, {0x00001FC9, 0x00001F73},
+{0x00001FCA, 0x00001F74}, {0x00001FCB, 0x00001F75}, {0x00001FCC, 0x00001FC3}, {0x00001FD8, 0x00001FD0},
+{0x00001FD9, 0x00001FD1}, {0x00001FDA, 0x00001F76}, {0x00001FDB, 0x00001F77}, {0x00001FE8, 0x00001FE0},
+{0x00001FE9, 0x00001FE1}, {0x00001FEA, 0x00001F7A}, {0x00001FEB, 0x00001F7B}, {0x00001FEC, 0x00001FE5},
+{0x00001FF8, 0x00001F78}, {0x00001FF9, 0x00001F79}, {0x00001FFA, 0x00001F7C}, {0x00001FFB, 0x00001F7D},
+{0x00001FFC, 0x00001FF3}, {0x00002126, 0x000003C9}, {0x0000212A, 0x0000006B}, {0x0000212B, 0x000000E5},
+{0x00002132, 0x0000214E}, {0x00002160, 0x00002170}, {0x00002161, 0x00002171}, {0x00002162, 0x00002172},
+{0x00002163, 0x00002173}, {0x00002164, 0x00002174}, {0x00002165, 0x00002175}, {0x00002166, 0x00002176},
+{0x00002167, 0x00002177}, {0x00002168, 0x00002178}, {0x00002169, 0x00002179}, {0x0000216A, 0x0000217A},
+{0x0000216B, 0x0000217B}, {0x0000216C, 0x0000217C}, {0x0000216D, 0x0000217D}, {0x0000216E, 0x0000217E},
+{0x0000216F, 0x0000217F}, {0x00002183, 0x00002184}, {0x000024B6, 0x000024D0}, {0x000024B7, 0x000024D1},
+{0x000024B8, 0x000024D2}, {0x000024B9, 0x000024D3}, {0x000024BA, 0x000024D4}, {0x000024BB, 0x000024D5},
+{0x000024BC, 0x000024D6}, {0x000024BD, 0x000024D7}, {0x000024BE, 0x000024D8}, {0x000024BF, 0x000024D9},
+{0x000024C0, 0x000024DA}, {0x000024C1, 0x000024DB}, {0x000024C2, 0x000024DC}, {0x000024C3, 0x000024DD},
+{0x000024C4, 0x000024DE}, {0x000024C5, 0x000024DF}, {0x000024C6, 0x000024E0}, {0x000024C7, 0x000024E1},
+{0x000024C8, 0x000024E2}, {0x000024C9, 0x000024E3}, {0x000024CA, 0x000024E4}, {0x000024CB, 0x000024E5},
+{0x000024CC, 0x000024E6}, {0x000024CD, 0x000024E7}, {0x000024CE, 0x000024E8}, {0x000024CF, 0x000024E9},
+{0x00002C00, 0x00002C30}, {0x00002C01, 0x00002C31}, {0x00002C02, 0x00002C32}, {0x00002C03, 0x00002C33},
+{0x00002C04, 0x00002C34}, {0x00002C05, 0x00002C35}, {0x00002C06, 0x00002C36}, {0x00002C07, 0x00002C37},
+{0x00002C08, 0x00002C38}, {0x00002C09, 0x00002C39}, {0x00002C0A, 0x00002C3A}, {0x00002C0B, 0x00002C3B},
+{0x00002C0C, 0x00002C3C}, {0x00002C0D, 0x00002C3D}, {0x00002C0E, 0x00002C3E}, {0x00002C0F, 0x00002C3F},
+{0x00002C10, 0x00002C40}, {0x00002C11, 0x00002C41}, {0x00002C12, 0x00002C42}, {0x00002C13, 0x00002C43},
+{0x00002C14, 0x00002C44}, {0x00002C15, 0x00002C45}, {0x00002C16, 0x00002C46}, {0x00002C17, 0x00002C47},
+{0x00002C18, 0x00002C48}, {0x00002C19, 0x00002C49}, {0x00002C1A, 0x00002C4A}, {0x00002C1B, 0x00002C4B},
+{0x00002C1C, 0x00002C4C}, {0x00002C1D, 0x00002C4D}, {0x00002C1E, 0x00002C4E}, {0x00002C1F, 0x00002C4F},
+{0x00002C20, 0x00002C50}, {0x00002C21, 0x00002C51}, {0x00002C22, 0x00002C52}, {0x00002C23, 0x00002C53},
+{0x00002C24, 0x00002C54}, {0x00002C25, 0x00002C55}, {0x00002C26, 0x00002C56}, {0x00002C27, 0x00002C57},
+{0x00002C28, 0x00002C58}, {0x00002C29, 0x00002C59}, {0x00002C2A, 0x00002C5A}, {0x00002C2B, 0x00002C5B},
+{0x00002C2C, 0x00002C5C}, {0x00002C2D, 0x00002C5D}, {0x00002C2E, 0x00002C5E}, {0x00002C60, 0x00002C61},
+{0x00002C62, 0x0000026B}, {0x00002C63, 0x00001D7D}, {0x00002C64, 0x0000027D}, {0x00002C67, 0x00002C68},
+{0x00002C69, 0x00002C6A}, {0x00002C6B, 0x00002C6C}, {0x00002C6D, 0x00000251}, {0x00002C6E, 0x00000271},
+{0x00002C6F, 0x00000250}, {0x00002C70, 0x00000252}, {0x00002C72, 0x00002C73}, {0x00002C75, 0x00002C76},
+{0x00002C7E, 0x0000023F}, {0x00002C7F, 0x00000240}, {0x00002C80, 0x00002C81}, {0x00002C82, 0x00002C83},
+{0x00002C84, 0x00002C85}, {0x00002C86, 0x00002C87}, {0x00002C88, 0x00002C89}, {0x00002C8A, 0x00002C8B},
+{0x00002C8C, 0x00002C8D}, {0x00002C8E, 0x00002C8F}, {0x00002C90, 0x00002C91}, {0x00002C92, 0x00002C93},
+{0x00002C94, 0x00002C95}, {0x00002C96, 0x00002C97}, {0x00002C98, 0x00002C99}, {0x00002C9A, 0x00002C9B},
+{0x00002C9C, 0x00002C9D}, {0x00002C9E, 0x00002C9F}, {0x00002CA0, 0x00002CA1}, {0x00002CA2, 0x00002CA3},
+{0x00002CA4, 0x00002CA5}, {0x00002CA6, 0x00002CA7}, {0x00002CA8, 0x00002CA9}, {0x00002CAA, 0x00002CAB},
+{0x00002CAC, 0x00002CAD}, {0x00002CAE, 0x00002CAF}, {0x00002CB0, 0x00002CB1}, {0x00002CB2, 0x00002CB3},
+{0x00002CB4, 0x00002CB5}, {0x00002CB6, 0x00002CB7}, {0x00002CB8, 0x00002CB9}, {0x00002CBA, 0x00002CBB},
+{0x00002CBC, 0x00002CBD}, {0x00002CBE, 0x00002CBF}, {0x00002CC0, 0x00002CC1}, {0x00002CC2, 0x00002CC3},
+{0x00002CC4, 0x00002CC5}, {0x00002CC6, 0x00002CC7}, {0x00002CC8, 0x00002CC9}, {0x00002CCA, 0x00002CCB},
+{0x00002CCC, 0x00002CCD}, {0x00002CCE, 0x00002CCF}, {0x00002CD0, 0x00002CD1}, {0x00002CD2, 0x00002CD3},
+{0x00002CD4, 0x00002CD5}, {0x00002CD6, 0x00002CD7}, {0x00002CD8, 0x00002CD9}, {0x00002CDA, 0x00002CDB},
+{0x00002CDC, 0x00002CDD}, {0x00002CDE, 0x00002CDF}, {0x00002CE0, 0x00002CE1}, {0x00002CE2, 0x00002CE3},
+{0x00002CEB, 0x00002CEC}, {0x00002CED, 0x00002CEE}, {0x00002CF2, 0x00002CF3}, {0x0000A640, 0x0000A641},
+{0x0000A642, 0x0000A643}, {0x0000A644, 0x0000A645}, {0x0000A646, 0x0000A647}, {0x0000A648, 0x0000A649},
+{0x0000A64A, 0x0000A64B}, {0x0000A64C, 0x0000A64D}, {0x0000A64E, 0x0000A64F}, {0x0000A650, 0x0000A651},
+{0x0000A652, 0x0000A653}, {0x0000A654, 0x0000A655}, {0x0000A656, 0x0000A657}, {0x0000A658, 0x0000A659},
+{0x0000A65A, 0x0000A65B}, {0x0000A65C, 0x0000A65D}, {0x0000A65E, 0x0000A65F}, {0x0000A660, 0x0000A661},
+{0x0000A662, 0x0000A663}, {0x0000A664, 0x0000A665}, {0x0000A666, 0x0000A667}, {0x0000A668, 0x0000A669},
+{0x0000A66A, 0x0000A66B}, {0x0000A66C, 0x0000A66D}, {0x0000A680, 0x0000A681}, {0x0000A682, 0x0000A683},
+{0x0000A684, 0x0000A685}, {0x0000A686, 0x0000A687}, {0x0000A688, 0x0000A689}, {0x0000A68A, 0x0000A68B},
+{0x0000A68C, 0x0000A68D}, {0x0000A68E, 0x0000A68F}, {0x0000A690, 0x0000A691}, {0x0000A692, 0x0000A693},
+{0x0000A694, 0x0000A695}, {0x0000A696, 0x0000A697}, {0x0000A698, 0x0000A699}, {0x0000A69A, 0x0000A69B},
+{0x0000A722, 0x0000A723}, {0x0000A724, 0x0000A725}, {0x0000A726, 0x0000A727}, {0x0000A728, 0x0000A729},
+{0x0000A72A, 0x0000A72B}, {0x0000A72C, 0x0000A72D}, {0x0000A72E, 0x0000A72F}, {0x0000A732, 0x0000A733},
+{0x0000A734, 0x0000A735}, {0x0000A736, 0x0000A737}, {0x0000A738, 0x0000A739}, {0x0000A73A, 0x0000A73B},
+{0x0000A73C, 0x0000A73D}, {0x0000A73E, 0x0000A73F}, {0x0000A740, 0x0000A741}, {0x0000A742, 0x0000A743},
+{0x0000A744, 0x0000A745}, {0x0000A746, 0x0000A747}, {0x0000A748, 0x0000A749}, {0x0000A74A, 0x0000A74B},
+{0x0000A74C, 0x0000A74D}, {0x0000A74E, 0x0000A74F}, {0x0000A750, 0x0000A751}, {0x0000A752, 0x0000A753},
+{0x0000A754, 0x0000A755}, {0x0000A756, 0x0000A757}, {0x0000A758, 0x0000A759}, {0x0000A75A, 0x0000A75B},
+{0x0000A75C, 0x0000A75D}, {0x0000A75E, 0x0000A75F}, {0x0000A760, 0x0000A761}, {0x0000A762, 0x0000A763},
+{0x0000A764, 0x0000A765}, {0x0000A766, 0x0000A767}, {0x0000A768, 0x0000A769}, {0x0000A76A, 0x0000A76B},
+{0x0000A76C, 0x0000A76D}, {0x0000A76E, 0x0000A76F}, {0x0000A779, 0x0000A77A}, {0x0000A77B, 0x0000A77C},
+{0x0000A77D, 0x00001D79}, {0x0000A77E, 0x0000A77F}, {0x0000A780, 0x0000A781}, {0x0000A782, 0x0000A783},
+{0x0000A784, 0x0000A785}, {0x0000A786, 0x0000A787}, {0x0000A78B, 0x0000A78C}, {0x0000A78D, 0x00000265},
+{0x0000A790, 0x0000A791}, {0x0000A792, 0x0000A793}, {0x0000A796, 0x0000A797}, {0x0000A798, 0x0000A799},
+{0x0000A79A, 0x0000A79B}, {0x0000A79C, 0x0000A79D}, {0x0000A79E, 0x0000A79F}, {0x0000A7A0, 0x0000A7A1},
+{0x0000A7A2, 0x0000A7A3}, {0x0000A7A4, 0x0000A7A5}, {0x0000A7A6, 0x0000A7A7}, {0x0000A7A8, 0x0000A7A9},
+{0x0000A7AA, 0x00000266}, {0x0000A7AB, 0x0000025C}, {0x0000A7AC, 0x00000261}, {0x0000A7AD, 0x0000026C},
+{0x0000A7AE, 0x0000026A}, {0x0000A7B0, 0x0000029E}, {0x0000A7B1, 0x00000287}, {0x0000A7B2, 0x0000029D},
+{0x0000A7B3, 0x0000AB53}, {0x0000A7B4, 0x0000A7B5}, {0x0000A7B6, 0x0000A7B7}, {0x0000A7B8, 0x0000A7B9},
+{0x0000A7BA, 0x0000A7BB}, {0x0000A7BC, 0x0000A7BD}, {0x0000A7BE, 0x0000A7BF}, {0x0000A7C2, 0x0000A7C3},
+{0x0000A7C4, 0x0000A794}, {0x0000A7C5, 0x00000282}, {0x0000A7C6, 0x00001D8E}, {0x0000A7C7, 0x0000A7C8},
+{0x0000A7C9, 0x0000A7CA}, {0x0000A7F5, 0x0000A7F6}, {0x0000FF21, 0x0000FF41}, {0x0000FF22, 0x0000FF42},
+{0x0000FF23, 0x0000FF43}, {0x0000FF24, 0x0000FF44}, {0x0000FF25, 0x0000FF45}, {0x0000FF26, 0x0000FF46},
+{0x0000FF27, 0x0000FF47}, {0x0000FF28, 0x0000FF48}, {0x0000FF29, 0x0000FF49}, {0x0000FF2A, 0x0000FF4A},
+{0x0000FF2B, 0x0000FF4B}, {0x0000FF2C, 0x0000FF4C}, {0x0000FF2D, 0x0000FF4D}, {0x0000FF2E, 0x0000FF4E},
+{0x0000FF2F, 0x0000FF4F}, {0x0000FF30, 0x0000FF50}, {0x0000FF31, 0x0000FF51}, {0x0000FF32, 0x0000FF52},
+{0x0000FF33, 0x0000FF53}, {0x0000FF34, 0x0000FF54}, {0x0000FF35, 0x0000FF55}, {0x0000FF36, 0x0000FF56},
+{0x0000FF37, 0x0000FF57}, {0x0000FF38, 0x0000FF58}, {0x0000FF39, 0x0000FF59}, {0x0000FF3A, 0x0000FF5A},
+{0x00010400, 0x00010428}, {0x00010401, 0x00010429}, {0x00010402, 0x0001042A}, {0x00010403, 0x0001042B},
+{0x00010404, 0x0001042C}, {0x00010405, 0x0001042D}, {0x00010406, 0x0001042E}, {0x00010407, 0x0001042F},
+{0x00010408, 0x00010430}, {0x00010409, 0x00010431}, {0x0001040A, 0x00010432}, {0x0001040B, 0x00010433},
+{0x0001040C, 0x00010434}, {0x0001040D, 0x00010435}, {0x0001040E, 0x00010436}, {0x0001040F, 0x00010437},
+{0x00010410, 0x00010438}, {0x00010411, 0x00010439}, {0x00010412, 0x0001043A}, {0x00010413, 0x0001043B},
+{0x00010414, 0x0001043C}, {0x00010415, 0x0001043D}, {0x00010416, 0x0001043E}, {0x00010417, 0x0001043F},
+{0x00010418, 0x00010440}, {0x00010419, 0x00010441}, {0x0001041A, 0x00010442}, {0x0001041B, 0x00010443},
+{0x0001041C, 0x00010444}, {0x0001041D, 0x00010445}, {0x0001041E, 0x00010446}, {0x0001041F, 0x00010447},
+{0x00010420, 0x00010448}, {0x00010421, 0x00010449}, {0x00010422, 0x0001044A}, {0x00010423, 0x0001044B},
+{0x00010424, 0x0001044C}, {0x00010425, 0x0001044D}, {0x00010426, 0x0001044E}, {0x00010427, 0x0001044F},
+{0x000104B0, 0x000104D8}, {0x000104B1, 0x000104D9}, {0x000104B2, 0x000104DA}, {0x000104B3, 0x000104DB},
+{0x000104B4, 0x000104DC}, {0x000104B5, 0x000104DD}, {0x000104B6, 0x000104DE}, {0x000104B7, 0x000104DF},
+{0x000104B8, 0x000104E0}, {0x000104B9, 0x000104E1}, {0x000104BA, 0x000104E2}, {0x000104BB, 0x000104E3},
+{0x000104BC, 0x000104E4}, {0x000104BD, 0x000104E5}, {0x000104BE, 0x000104E6}, {0x000104BF, 0x000104E7},
+{0x000104C0, 0x000104E8}, {0x000104C1, 0x000104E9}, {0x000104C2, 0x000104EA}, {0x000104C3, 0x000104EB},
+{0x000104C4, 0x000104EC}, {0x000104C5, 0x000104ED}, {0x000104C6, 0x000104EE}, {0x000104C7, 0x000104EF},
+{0x000104C8, 0x000104F0}, {0x000104C9, 0x000104F1}, {0x000104CA, 0x000104F2}, {0x000104CB, 0x000104F3},
+{0x000104CC, 0x000104F4}, {0x000104CD, 0x000104F5}, {0x000104CE, 0x000104F6}, {0x000104CF, 0x000104F7},
+{0x000104D0, 0x000104F8}, {0x000104D1, 0x000104F9}, {0x000104D2, 0x000104FA}, {0x000104D3, 0x000104FB},
+{0x00010C80, 0x00010CC0}, {0x00010C81, 0x00010CC1}, {0x00010C82, 0x00010CC2}, {0x00010C83, 0x00010CC3},
+{0x00010C84, 0x00010CC4}, {0x00010C85, 0x00010CC5}, {0x00010C86, 0x00010CC6}, {0x00010C87, 0x00010CC7},
+{0x00010C88, 0x00010CC8}, {0x00010C89, 0x00010CC9}, {0x00010C8A, 0x00010CCA}, {0x00010C8B, 0x00010CCB},
+{0x00010C8C, 0x00010CCC}, {0x00010C8D, 0x00010CCD}, {0x00010C8E, 0x00010CCE}, {0x00010C8F, 0x00010CCF},
+{0x00010C90, 0x00010CD0}, {0x00010C91, 0x00010CD1}, {0x00010C92, 0x00010CD2}, {0x00010C93, 0x00010CD3},
+{0x00010C94, 0x00010CD4}, {0x00010C95, 0x00010CD5}, {0x00010C96, 0x00010CD6}, {0x00010C97, 0x00010CD7},
+{0x00010C98, 0x00010CD8}, {0x00010C99, 0x00010CD9}, {0x00010C9A, 0x00010CDA}, {0x00010C9B, 0x00010CDB},
+{0x00010C9C, 0x00010CDC}, {0x00010C9D, 0x00010CDD}, {0x00010C9E, 0x00010CDE}, {0x00010C9F, 0x00010CDF},
+{0x00010CA0, 0x00010CE0}, {0x00010CA1, 0x00010CE1}, {0x00010CA2, 0x00010CE2}, {0x00010CA3, 0x00010CE3},
+{0x00010CA4, 0x00010CE4}, {0x00010CA5, 0x00010CE5}, {0x00010CA6, 0x00010CE6}, {0x00010CA7, 0x00010CE7},
+{0x00010CA8, 0x00010CE8}, {0x00010CA9, 0x00010CE9}, {0x00010CAA, 0x00010CEA}, {0x00010CAB, 0x00010CEB},
+{0x00010CAC, 0x00010CEC}, {0x00010CAD, 0x00010CED}, {0x00010CAE, 0x00010CEE}, {0x00010CAF, 0x00010CEF},
+{0x00010CB0, 0x00010CF0}, {0x00010CB1, 0x00010CF1}, {0x00010CB2, 0x00010CF2}, {0x000118A0, 0x000118C0},
+{0x000118A1, 0x000118C1}, {0x000118A2, 0x000118C2}, {0x000118A3, 0x000118C3}, {0x000118A4, 0x000118C4},
+{0x000118A5, 0x000118C5}, {0x000118A6, 0x000118C6}, {0x000118A7, 0x000118C7}, {0x000118A8, 0x000118C8},
+{0x000118A9, 0x000118C9}, {0x000118AA, 0x000118CA}, {0x000118AB, 0x000118CB}, {0x000118AC, 0x000118CC},
+{0x000118AD, 0x000118CD}, {0x000118AE, 0x000118CE}, {0x000118AF, 0x000118CF}, {0x000118B0, 0x000118D0},
+{0x000118B1, 0x000118D1}, {0x000118B2, 0x000118D2}, {0x000118B3, 0x000118D3}, {0x000118B4, 0x000118D4},
+{0x000118B5, 0x000118D5}, {0x000118B6, 0x000118D6}, {0x000118B7, 0x000118D7}, {0x000118B8, 0x000118D8},
+{0x000118B9, 0x000118D9}, {0x000118BA, 0x000118DA}, {0x000118BB, 0x000118DB}, {0x000118BC, 0x000118DC},
+{0x000118BD, 0x000118DD}, {0x000118BE, 0x000118DE}, {0x000118BF, 0x000118DF}, {0x00016E40, 0x00016E60},
+{0x00016E41, 0x00016E61}, {0x00016E42, 0x00016E62}, {0x00016E43, 0x00016E63}, {0x00016E44, 0x00016E64},
+{0x00016E45, 0x00016E65}, {0x00016E46, 0x00016E66}, {0x00016E47, 0x00016E67}, {0x00016E48, 0x00016E68},
+{0x00016E49, 0x00016E69}, {0x00016E4A, 0x00016E6A}, {0x00016E4B, 0x00016E6B}, {0x00016E4C, 0x00016E6C},
+{0x00016E4D, 0x00016E6D}, {0x00016E4E, 0x00016E6E}, {0x00016E4F, 0x00016E6F}, {0x00016E50, 0x00016E70},
+{0x00016E51, 0x00016E71}, {0x00016E52, 0x00016E72}, {0x00016E53, 0x00016E73}, {0x00016E54, 0x00016E74},
+{0x00016E55, 0x00016E75}, {0x00016E56, 0x00016E76}, {0x00016E57, 0x00016E77}, {0x00016E58, 0x00016E78},
+{0x00016E59, 0x00016E79}, {0x00016E5A, 0x00016E7A}, {0x00016E5B, 0x00016E7B}, {0x00016E5C, 0x00016E7C},
+{0x00016E5D, 0x00016E7D}, {0x00016E5E, 0x00016E7E}, {0x00016E5F, 0x00016E7F}, {0x0001E900, 0x0001E922},
+{0x0001E901, 0x0001E923}, {0x0001E902, 0x0001E924}, {0x0001E903, 0x0001E925}, {0x0001E904, 0x0001E926},
+{0x0001E905, 0x0001E927}, {0x0001E906, 0x0001E928}, {0x0001E907, 0x0001E929}, {0x0001E908, 0x0001E92A},
+{0x0001E909, 0x0001E92B}, {0x0001E90A, 0x0001E92C}, {0x0001E90B, 0x0001E92D}, {0x0001E90C, 0x0001E92E},
+{0x0001E90D, 0x0001E92F}, {0x0001E90E, 0x0001E930}, {0x0001E90F, 0x0001E931}, {0x0001E910, 0x0001E932},
+{0x0001E911, 0x0001E933}, {0x0001E912, 0x0001E934}, {0x0001E913, 0x0001E935}, {0x0001E914, 0x0001E936},
+{0x0001E915, 0x0001E937}, {0x0001E916, 0x0001E938}, {0x0001E917, 0x0001E939}, {0x0001E918, 0x0001E93A},
+{0x0001E919, 0x0001E93B}, {0x0001E91A, 0x0001E93C}, {0x0001E91B, 0x0001E93D}, {0x0001E91C, 0x0001E93E},
+{0x0001E91D, 0x0001E93F}, {0x0001E91E, 0x0001E940}, {0x0001E91F, 0x0001E941}, {0x0001E920, 0x0001E942},
+{0x0001E921, 0x0001E943},
+};
+
+const std::map unicode_map_uppercase = {
+{0x00000061, 0x00000041}, {0x00000062, 0x00000042}, {0x00000063, 0x00000043}, {0x00000064, 0x00000044},
+{0x00000065, 0x00000045}, {0x00000066, 0x00000046}, {0x00000067, 0x00000047}, {0x00000068, 0x00000048},
+{0x00000069, 0x00000049}, {0x0000006A, 0x0000004A}, {0x0000006B, 0x0000004B}, {0x0000006C, 0x0000004C},
+{0x0000006D, 0x0000004D}, {0x0000006E, 0x0000004E}, {0x0000006F, 0x0000004F}, {0x00000070, 0x00000050},
+{0x00000071, 0x00000051}, {0x00000072, 0x00000052}, {0x00000073, 0x00000053}, {0x00000074, 0x00000054},
+{0x00000075, 0x00000055}, {0x00000076, 0x00000056}, {0x00000077, 0x00000057}, {0x00000078, 0x00000058},
+{0x00000079, 0x00000059}, {0x0000007A, 0x0000005A}, {0x000000B5, 0x0000039C}, {0x000000DF, 0x00000053},
+{0x000000E0, 0x000000C0}, {0x000000E1, 0x000000C1}, {0x000000E2, 0x000000C2}, {0x000000E3, 0x000000C3},
+{0x000000E4, 0x000000C4}, {0x000000E5, 0x000000C5}, {0x000000E6, 0x000000C6}, {0x000000E7, 0x000000C7},
+{0x000000E8, 0x000000C8}, {0x000000E9, 0x000000C9}, {0x000000EA, 0x000000CA}, {0x000000EB, 0x000000CB},
+{0x000000EC, 0x000000CC}, {0x000000ED, 0x000000CD}, {0x000000EE, 0x000000CE}, {0x000000EF, 0x000000CF},
+{0x000000F0, 0x000000D0}, {0x000000F1, 0x000000D1}, {0x000000F2, 0x000000D2}, {0x000000F3, 0x000000D3},
+{0x000000F4, 0x000000D4}, {0x000000F5, 0x000000D5}, {0x000000F6, 0x000000D6}, {0x000000F8, 0x000000D8},
+{0x000000F9, 0x000000D9}, {0x000000FA, 0x000000DA}, {0x000000FB, 0x000000DB}, {0x000000FC, 0x000000DC},
+{0x000000FD, 0x000000DD}, {0x000000FE, 0x000000DE}, {0x000000FF, 0x00000178}, {0x00000101, 0x00000100},
+{0x00000103, 0x00000102}, {0x00000105, 0x00000104}, {0x00000107, 0x00000106}, {0x00000109, 0x00000108},
+{0x0000010B, 0x0000010A}, {0x0000010D, 0x0000010C}, {0x0000010F, 0x0000010E}, {0x00000111, 0x00000110},
+{0x00000113, 0x00000112}, {0x00000115, 0x00000114}, {0x00000117, 0x00000116}, {0x00000119, 0x00000118},
+{0x0000011B, 0x0000011A}, {0x0000011D, 0x0000011C}, {0x0000011F, 0x0000011E}, {0x00000121, 0x00000120},
+{0x00000123, 0x00000122}, {0x00000125, 0x00000124}, {0x00000127, 0x00000126}, {0x00000129, 0x00000128},
+{0x0000012B, 0x0000012A}, {0x0000012D, 0x0000012C}, {0x0000012F, 0x0000012E}, {0x00000131, 0x00000049},
+{0x00000133, 0x00000132}, {0x00000135, 0x00000134}, {0x00000137, 0x00000136}, {0x0000013A, 0x00000139},
+{0x0000013C, 0x0000013B}, {0x0000013E, 0x0000013D}, {0x00000140, 0x0000013F}, {0x00000142, 0x00000141},
+{0x00000144, 0x00000143}, {0x00000146, 0x00000145}, {0x00000148, 0x00000147}, {0x00000149, 0x000002BC},
+{0x0000014B, 0x0000014A}, {0x0000014D, 0x0000014C}, {0x0000014F, 0x0000014E}, {0x00000151, 0x00000150},
+{0x00000153, 0x00000152}, {0x00000155, 0x00000154}, {0x00000157, 0x00000156}, {0x00000159, 0x00000158},
+{0x0000015B, 0x0000015A}, {0x0000015D, 0x0000015C}, {0x0000015F, 0x0000015E}, {0x00000161, 0x00000160},
+{0x00000163, 0x00000162}, {0x00000165, 0x00000164}, {0x00000167, 0x00000166}, {0x00000169, 0x00000168},
+{0x0000016B, 0x0000016A}, {0x0000016D, 0x0000016C}, {0x0000016F, 0x0000016E}, {0x00000171, 0x00000170},
+{0x00000173, 0x00000172}, {0x00000175, 0x00000174}, {0x00000177, 0x00000176}, {0x0000017A, 0x00000179},
+{0x0000017C, 0x0000017B}, {0x0000017E, 0x0000017D}, {0x0000017F, 0x00000053}, {0x00000180, 0x00000243},
+{0x00000183, 0x00000182}, {0x00000185, 0x00000184}, {0x00000188, 0x00000187}, {0x0000018C, 0x0000018B},
+{0x00000192, 0x00000191}, {0x00000195, 0x000001F6}, {0x00000199, 0x00000198}, {0x0000019A, 0x0000023D},
+{0x0000019E, 0x00000220}, {0x000001A1, 0x000001A0}, {0x000001A3, 0x000001A2}, {0x000001A5, 0x000001A4},
+{0x000001A8, 0x000001A7}, {0x000001AD, 0x000001AC}, {0x000001B0, 0x000001AF}, {0x000001B4, 0x000001B3},
+{0x000001B6, 0x000001B5}, {0x000001B9, 0x000001B8}, {0x000001BD, 0x000001BC}, {0x000001BF, 0x000001F7},
+{0x000001C5, 0x000001C4}, {0x000001C6, 0x000001C4}, {0x000001C8, 0x000001C7}, {0x000001C9, 0x000001C7},
+{0x000001CB, 0x000001CA}, {0x000001CC, 0x000001CA}, {0x000001CE, 0x000001CD}, {0x000001D0, 0x000001CF},
+{0x000001D2, 0x000001D1}, {0x000001D4, 0x000001D3}, {0x000001D6, 0x000001D5}, {0x000001D8, 0x000001D7},
+{0x000001DA, 0x000001D9}, {0x000001DC, 0x000001DB}, {0x000001DD, 0x0000018E}, {0x000001DF, 0x000001DE},
+{0x000001E1, 0x000001E0}, {0x000001E3, 0x000001E2}, {0x000001E5, 0x000001E4}, {0x000001E7, 0x000001E6},
+{0x000001E9, 0x000001E8}, {0x000001EB, 0x000001EA}, {0x000001ED, 0x000001EC}, {0x000001EF, 0x000001EE},
+{0x000001F0, 0x0000004A}, {0x000001F2, 0x000001F1}, {0x000001F3, 0x000001F1}, {0x000001F5, 0x000001F4},
+{0x000001F9, 0x000001F8}, {0x000001FB, 0x000001FA}, {0x000001FD, 0x000001FC}, {0x000001FF, 0x000001FE},
+{0x00000201, 0x00000200}, {0x00000203, 0x00000202}, {0x00000205, 0x00000204}, {0x00000207, 0x00000206},
+{0x00000209, 0x00000208}, {0x0000020B, 0x0000020A}, {0x0000020D, 0x0000020C}, {0x0000020F, 0x0000020E},
+{0x00000211, 0x00000210}, {0x00000213, 0x00000212}, {0x00000215, 0x00000214}, {0x00000217, 0x00000216},
+{0x00000219, 0x00000218}, {0x0000021B, 0x0000021A}, {0x0000021D, 0x0000021C}, {0x0000021F, 0x0000021E},
+{0x00000223, 0x00000222}, {0x00000225, 0x00000224}, {0x00000227, 0x00000226}, {0x00000229, 0x00000228},
+{0x0000022B, 0x0000022A}, {0x0000022D, 0x0000022C}, {0x0000022F, 0x0000022E}, {0x00000231, 0x00000230},
+{0x00000233, 0x00000232}, {0x0000023C, 0x0000023B}, {0x0000023F, 0x00002C7E}, {0x00000240, 0x00002C7F},
+{0x00000242, 0x00000241}, {0x00000247, 0x00000246}, {0x00000249, 0x00000248}, {0x0000024B, 0x0000024A},
+{0x0000024D, 0x0000024C}, {0x0000024F, 0x0000024E}, {0x00000250, 0x00002C6F}, {0x00000251, 0x00002C6D},
+{0x00000252, 0x00002C70}, {0x00000253, 0x00000181}, {0x00000254, 0x00000186}, {0x00000256, 0x00000189},
+{0x00000257, 0x0000018A}, {0x00000259, 0x0000018F}, {0x0000025B, 0x00000190}, {0x0000025C, 0x0000A7AB},
+{0x00000260, 0x00000193}, {0x00000261, 0x0000A7AC}, {0x00000263, 0x00000194}, {0x00000265, 0x0000A78D},
+{0x00000266, 0x0000A7AA}, {0x00000268, 0x00000197}, {0x00000269, 0x00000196}, {0x0000026A, 0x0000A7AE},
+{0x0000026B, 0x00002C62}, {0x0000026C, 0x0000A7AD}, {0x0000026F, 0x0000019C}, {0x00000271, 0x00002C6E},
+{0x00000272, 0x0000019D}, {0x00000275, 0x0000019F}, {0x0000027D, 0x00002C64}, {0x00000280, 0x000001A6},
+{0x00000282, 0x0000A7C5}, {0x00000283, 0x000001A9}, {0x00000287, 0x0000A7B1}, {0x00000288, 0x000001AE},
+{0x00000289, 0x00000244}, {0x0000028A, 0x000001B1}, {0x0000028B, 0x000001B2}, {0x0000028C, 0x00000245},
+{0x00000292, 0x000001B7}, {0x0000029D, 0x0000A7B2}, {0x0000029E, 0x0000A7B0}, {0x00000345, 0x00000399},
+{0x00000371, 0x00000370}, {0x00000373, 0x00000372}, {0x00000377, 0x00000376}, {0x0000037B, 0x000003FD},
+{0x0000037C, 0x000003FE}, {0x0000037D, 0x000003FF}, {0x00000390, 0x00000399}, {0x000003AC, 0x00000386},
+{0x000003AD, 0x00000388}, {0x000003AE, 0x00000389}, {0x000003AF, 0x0000038A}, {0x000003B0, 0x000003A5},
+{0x000003B1, 0x00000391}, {0x000003B2, 0x00000392}, {0x000003B3, 0x00000393}, {0x000003B4, 0x00000394},
+{0x000003B5, 0x00000395}, {0x000003B6, 0x00000396}, {0x000003B7, 0x00000397}, {0x000003B8, 0x00000398},
+{0x000003B9, 0x00000399}, {0x000003BA, 0x0000039A}, {0x000003BB, 0x0000039B}, {0x000003BC, 0x0000039C},
+{0x000003BD, 0x0000039D}, {0x000003BE, 0x0000039E}, {0x000003BF, 0x0000039F}, {0x000003C0, 0x000003A0},
+{0x000003C1, 0x000003A1}, {0x000003C2, 0x000003A3}, {0x000003C3, 0x000003A3}, {0x000003C4, 0x000003A4},
+{0x000003C5, 0x000003A5}, {0x000003C6, 0x000003A6}, {0x000003C7, 0x000003A7}, {0x000003C8, 0x000003A8},
+{0x000003C9, 0x000003A9}, {0x000003CA, 0x000003AA}, {0x000003CB, 0x000003AB}, {0x000003CC, 0x0000038C},
+{0x000003CD, 0x0000038E}, {0x000003CE, 0x0000038F}, {0x000003D0, 0x00000392}, {0x000003D1, 0x00000398},
+{0x000003D5, 0x000003A6}, {0x000003D6, 0x000003A0}, {0x000003D7, 0x000003CF}, {0x000003D9, 0x000003D8},
+{0x000003DB, 0x000003DA}, {0x000003DD, 0x000003DC}, {0x000003DF, 0x000003DE}, {0x000003E1, 0x000003E0},
+{0x000003E3, 0x000003E2}, {0x000003E5, 0x000003E4}, {0x000003E7, 0x000003E6}, {0x000003E9, 0x000003E8},
+{0x000003EB, 0x000003EA}, {0x000003ED, 0x000003EC}, {0x000003EF, 0x000003EE}, {0x000003F0, 0x0000039A},
+{0x000003F1, 0x000003A1}, {0x000003F2, 0x000003F9}, {0x000003F3, 0x0000037F}, {0x000003F5, 0x00000395},
+{0x000003F8, 0x000003F7}, {0x000003FB, 0x000003FA}, {0x00000430, 0x00000410}, {0x00000431, 0x00000411},
+{0x00000432, 0x00000412}, {0x00000433, 0x00000413}, {0x00000434, 0x00000414}, {0x00000435, 0x00000415},
+{0x00000436, 0x00000416}, {0x00000437, 0x00000417}, {0x00000438, 0x00000418}, {0x00000439, 0x00000419},
+{0x0000043A, 0x0000041A}, {0x0000043B, 0x0000041B}, {0x0000043C, 0x0000041C}, {0x0000043D, 0x0000041D},
+{0x0000043E, 0x0000041E}, {0x0000043F, 0x0000041F}, {0x00000440, 0x00000420}, {0x00000441, 0x00000421},
+{0x00000442, 0x00000422}, {0x00000443, 0x00000423}, {0x00000444, 0x00000424}, {0x00000445, 0x00000425},
+{0x00000446, 0x00000426}, {0x00000447, 0x00000427}, {0x00000448, 0x00000428}, {0x00000449, 0x00000429},
+{0x0000044A, 0x0000042A}, {0x0000044B, 0x0000042B}, {0x0000044C, 0x0000042C}, {0x0000044D, 0x0000042D},
+{0x0000044E, 0x0000042E}, {0x0000044F, 0x0000042F}, {0x00000450, 0x00000400}, {0x00000451, 0x00000401},
+{0x00000452, 0x00000402}, {0x00000453, 0x00000403}, {0x00000454, 0x00000404}, {0x00000455, 0x00000405},
+{0x00000456, 0x00000406}, {0x00000457, 0x00000407}, {0x00000458, 0x00000408}, {0x00000459, 0x00000409},
+{0x0000045A, 0x0000040A}, {0x0000045B, 0x0000040B}, {0x0000045C, 0x0000040C}, {0x0000045D, 0x0000040D},
+{0x0000045E, 0x0000040E}, {0x0000045F, 0x0000040F}, {0x00000461, 0x00000460}, {0x00000463, 0x00000462},
+{0x00000465, 0x00000464}, {0x00000467, 0x00000466}, {0x00000469, 0x00000468}, {0x0000046B, 0x0000046A},
+{0x0000046D, 0x0000046C}, {0x0000046F, 0x0000046E}, {0x00000471, 0x00000470}, {0x00000473, 0x00000472},
+{0x00000475, 0x00000474}, {0x00000477, 0x00000476}, {0x00000479, 0x00000478}, {0x0000047B, 0x0000047A},
+{0x0000047D, 0x0000047C}, {0x0000047F, 0x0000047E}, {0x00000481, 0x00000480}, {0x0000048B, 0x0000048A},
+{0x0000048D, 0x0000048C}, {0x0000048F, 0x0000048E}, {0x00000491, 0x00000490}, {0x00000493, 0x00000492},
+{0x00000495, 0x00000494}, {0x00000497, 0x00000496}, {0x00000499, 0x00000498}, {0x0000049B, 0x0000049A},
+{0x0000049D, 0x0000049C}, {0x0000049F, 0x0000049E}, {0x000004A1, 0x000004A0}, {0x000004A3, 0x000004A2},
+{0x000004A5, 0x000004A4}, {0x000004A7, 0x000004A6}, {0x000004A9, 0x000004A8}, {0x000004AB, 0x000004AA},
+{0x000004AD, 0x000004AC}, {0x000004AF, 0x000004AE}, {0x000004B1, 0x000004B0}, {0x000004B3, 0x000004B2},
+{0x000004B5, 0x000004B4}, {0x000004B7, 0x000004B6}, {0x000004B9, 0x000004B8}, {0x000004BB, 0x000004BA},
+{0x000004BD, 0x000004BC}, {0x000004BF, 0x000004BE}, {0x000004C2, 0x000004C1}, {0x000004C4, 0x000004C3},
+{0x000004C6, 0x000004C5}, {0x000004C8, 0x000004C7}, {0x000004CA, 0x000004C9}, {0x000004CC, 0x000004CB},
+{0x000004CE, 0x000004CD}, {0x000004CF, 0x000004C0}, {0x000004D1, 0x000004D0}, {0x000004D3, 0x000004D2},
+{0x000004D5, 0x000004D4}, {0x000004D7, 0x000004D6}, {0x000004D9, 0x000004D8}, {0x000004DB, 0x000004DA},
+{0x000004DD, 0x000004DC}, {0x000004DF, 0x000004DE}, {0x000004E1, 0x000004E0}, {0x000004E3, 0x000004E2},
+{0x000004E5, 0x000004E4}, {0x000004E7, 0x000004E6}, {0x000004E9, 0x000004E8}, {0x000004EB, 0x000004EA},
+{0x000004ED, 0x000004EC}, {0x000004EF, 0x000004EE}, {0x000004F1, 0x000004F0}, {0x000004F3, 0x000004F2},
+{0x000004F5, 0x000004F4}, {0x000004F7, 0x000004F6}, {0x000004F9, 0x000004F8}, {0x000004FB, 0x000004FA},
+{0x000004FD, 0x000004FC}, {0x000004FF, 0x000004FE}, {0x00000501, 0x00000500}, {0x00000503, 0x00000502},
+{0x00000505, 0x00000504}, {0x00000507, 0x00000506}, {0x00000509, 0x00000508}, {0x0000050B, 0x0000050A},
+{0x0000050D, 0x0000050C}, {0x0000050F, 0x0000050E}, {0x00000511, 0x00000510}, {0x00000513, 0x00000512},
+{0x00000515, 0x00000514}, {0x00000517, 0x00000516}, {0x00000519, 0x00000518}, {0x0000051B, 0x0000051A},
+{0x0000051D, 0x0000051C}, {0x0000051F, 0x0000051E}, {0x00000521, 0x00000520}, {0x00000523, 0x00000522},
+{0x00000525, 0x00000524}, {0x00000527, 0x00000526}, {0x00000529, 0x00000528}, {0x0000052B, 0x0000052A},
+{0x0000052D, 0x0000052C}, {0x0000052F, 0x0000052E}, {0x00000561, 0x00000531}, {0x00000562, 0x00000532},
+{0x00000563, 0x00000533}, {0x00000564, 0x00000534}, {0x00000565, 0x00000535}, {0x00000566, 0x00000536},
+{0x00000567, 0x00000537}, {0x00000568, 0x00000538}, {0x00000569, 0x00000539}, {0x0000056A, 0x0000053A},
+{0x0000056B, 0x0000053B}, {0x0000056C, 0x0000053C}, {0x0000056D, 0x0000053D}, {0x0000056E, 0x0000053E},
+{0x0000056F, 0x0000053F}, {0x00000570, 0x00000540}, {0x00000571, 0x00000541}, {0x00000572, 0x00000542},
+{0x00000573, 0x00000543}, {0x00000574, 0x00000544}, {0x00000575, 0x00000545}, {0x00000576, 0x00000546},
+{0x00000577, 0x00000547}, {0x00000578, 0x00000548}, {0x00000579, 0x00000549}, {0x0000057A, 0x0000054A},
+{0x0000057B, 0x0000054B}, {0x0000057C, 0x0000054C}, {0x0000057D, 0x0000054D}, {0x0000057E, 0x0000054E},
+{0x0000057F, 0x0000054F}, {0x00000580, 0x00000550}, {0x00000581, 0x00000551}, {0x00000582, 0x00000552},
+{0x00000583, 0x00000553}, {0x00000584, 0x00000554}, {0x00000585, 0x00000555}, {0x00000586, 0x00000556},
+{0x00000587, 0x00000535}, {0x000010D0, 0x00001C90}, {0x000010D1, 0x00001C91}, {0x000010D2, 0x00001C92},
+{0x000010D3, 0x00001C93}, {0x000010D4, 0x00001C94}, {0x000010D5, 0x00001C95}, {0x000010D6, 0x00001C96},
+{0x000010D7, 0x00001C97}, {0x000010D8, 0x00001C98}, {0x000010D9, 0x00001C99}, {0x000010DA, 0x00001C9A},
+{0x000010DB, 0x00001C9B}, {0x000010DC, 0x00001C9C}, {0x000010DD, 0x00001C9D}, {0x000010DE, 0x00001C9E},
+{0x000010DF, 0x00001C9F}, {0x000010E0, 0x00001CA0}, {0x000010E1, 0x00001CA1}, {0x000010E2, 0x00001CA2},
+{0x000010E3, 0x00001CA3}, {0x000010E4, 0x00001CA4}, {0x000010E5, 0x00001CA5}, {0x000010E6, 0x00001CA6},
+{0x000010E7, 0x00001CA7}, {0x000010E8, 0x00001CA8}, {0x000010E9, 0x00001CA9}, {0x000010EA, 0x00001CAA},
+{0x000010EB, 0x00001CAB}, {0x000010EC, 0x00001CAC}, {0x000010ED, 0x00001CAD}, {0x000010EE, 0x00001CAE},
+{0x000010EF, 0x00001CAF}, {0x000010F0, 0x00001CB0}, {0x000010F1, 0x00001CB1}, {0x000010F2, 0x00001CB2},
+{0x000010F3, 0x00001CB3}, {0x000010F4, 0x00001CB4}, {0x000010F5, 0x00001CB5}, {0x000010F6, 0x00001CB6},
+{0x000010F7, 0x00001CB7}, {0x000010F8, 0x00001CB8}, {0x000010F9, 0x00001CB9}, {0x000010FA, 0x00001CBA},
+{0x000010FD, 0x00001CBD}, {0x000010FE, 0x00001CBE}, {0x000010FF, 0x00001CBF}, {0x000013F8, 0x000013F0},
+{0x000013F9, 0x000013F1}, {0x000013FA, 0x000013F2}, {0x000013FB, 0x000013F3}, {0x000013FC, 0x000013F4},
+{0x000013FD, 0x000013F5}, {0x00001C80, 0x00000412}, {0x00001C81, 0x00000414}, {0x00001C82, 0x0000041E},
+{0x00001C83, 0x00000421}, {0x00001C84, 0x00000422}, {0x00001C85, 0x00000422}, {0x00001C86, 0x0000042A},
+{0x00001C87, 0x00000462}, {0x00001C88, 0x0000A64A}, {0x00001D79, 0x0000A77D}, {0x00001D7D, 0x00002C63},
+{0x00001D8E, 0x0000A7C6}, {0x00001E01, 0x00001E00}, {0x00001E03, 0x00001E02}, {0x00001E05, 0x00001E04},
+{0x00001E07, 0x00001E06}, {0x00001E09, 0x00001E08}, {0x00001E0B, 0x00001E0A}, {0x00001E0D, 0x00001E0C},
+{0x00001E0F, 0x00001E0E}, {0x00001E11, 0x00001E10}, {0x00001E13, 0x00001E12}, {0x00001E15, 0x00001E14},
+{0x00001E17, 0x00001E16}, {0x00001E19, 0x00001E18}, {0x00001E1B, 0x00001E1A}, {0x00001E1D, 0x00001E1C},
+{0x00001E1F, 0x00001E1E}, {0x00001E21, 0x00001E20}, {0x00001E23, 0x00001E22}, {0x00001E25, 0x00001E24},
+{0x00001E27, 0x00001E26}, {0x00001E29, 0x00001E28}, {0x00001E2B, 0x00001E2A}, {0x00001E2D, 0x00001E2C},
+{0x00001E2F, 0x00001E2E}, {0x00001E31, 0x00001E30}, {0x00001E33, 0x00001E32}, {0x00001E35, 0x00001E34},
+{0x00001E37, 0x00001E36}, {0x00001E39, 0x00001E38}, {0x00001E3B, 0x00001E3A}, {0x00001E3D, 0x00001E3C},
+{0x00001E3F, 0x00001E3E}, {0x00001E41, 0x00001E40}, {0x00001E43, 0x00001E42}, {0x00001E45, 0x00001E44},
+{0x00001E47, 0x00001E46}, {0x00001E49, 0x00001E48}, {0x00001E4B, 0x00001E4A}, {0x00001E4D, 0x00001E4C},
+{0x00001E4F, 0x00001E4E}, {0x00001E51, 0x00001E50}, {0x00001E53, 0x00001E52}, {0x00001E55, 0x00001E54},
+{0x00001E57, 0x00001E56}, {0x00001E59, 0x00001E58}, {0x00001E5B, 0x00001E5A}, {0x00001E5D, 0x00001E5C},
+{0x00001E5F, 0x00001E5E}, {0x00001E61, 0x00001E60}, {0x00001E63, 0x00001E62}, {0x00001E65, 0x00001E64},
+{0x00001E67, 0x00001E66}, {0x00001E69, 0x00001E68}, {0x00001E6B, 0x00001E6A}, {0x00001E6D, 0x00001E6C},
+{0x00001E6F, 0x00001E6E}, {0x00001E71, 0x00001E70}, {0x00001E73, 0x00001E72}, {0x00001E75, 0x00001E74},
+{0x00001E77, 0x00001E76}, {0x00001E79, 0x00001E78}, {0x00001E7B, 0x00001E7A}, {0x00001E7D, 0x00001E7C},
+{0x00001E7F, 0x00001E7E}, {0x00001E81, 0x00001E80}, {0x00001E83, 0x00001E82}, {0x00001E85, 0x00001E84},
+{0x00001E87, 0x00001E86}, {0x00001E89, 0x00001E88}, {0x00001E8B, 0x00001E8A}, {0x00001E8D, 0x00001E8C},
+{0x00001E8F, 0x00001E8E}, {0x00001E91, 0x00001E90}, {0x00001E93, 0x00001E92}, {0x00001E95, 0x00001E94},
+{0x00001E96, 0x00000048}, {0x00001E97, 0x00000054}, {0x00001E98, 0x00000057}, {0x00001E99, 0x00000059},
+{0x00001E9A, 0x00000041}, {0x00001E9B, 0x00001E60}, {0x00001EA1, 0x00001EA0}, {0x00001EA3, 0x00001EA2},
+{0x00001EA5, 0x00001EA4}, {0x00001EA7, 0x00001EA6}, {0x00001EA9, 0x00001EA8}, {0x00001EAB, 0x00001EAA},
+{0x00001EAD, 0x00001EAC}, {0x00001EAF, 0x00001EAE}, {0x00001EB1, 0x00001EB0}, {0x00001EB3, 0x00001EB2},
+{0x00001EB5, 0x00001EB4}, {0x00001EB7, 0x00001EB6}, {0x00001EB9, 0x00001EB8}, {0x00001EBB, 0x00001EBA},
+{0x00001EBD, 0x00001EBC}, {0x00001EBF, 0x00001EBE}, {0x00001EC1, 0x00001EC0}, {0x00001EC3, 0x00001EC2},
+{0x00001EC5, 0x00001EC4}, {0x00001EC7, 0x00001EC6}, {0x00001EC9, 0x00001EC8}, {0x00001ECB, 0x00001ECA},
+{0x00001ECD, 0x00001ECC}, {0x00001ECF, 0x00001ECE}, {0x00001ED1, 0x00001ED0}, {0x00001ED3, 0x00001ED2},
+{0x00001ED5, 0x00001ED4}, {0x00001ED7, 0x00001ED6}, {0x00001ED9, 0x00001ED8}, {0x00001EDB, 0x00001EDA},
+{0x00001EDD, 0x00001EDC}, {0x00001EDF, 0x00001EDE}, {0x00001EE1, 0x00001EE0}, {0x00001EE3, 0x00001EE2},
+{0x00001EE5, 0x00001EE4}, {0x00001EE7, 0x00001EE6}, {0x00001EE9, 0x00001EE8}, {0x00001EEB, 0x00001EEA},
+{0x00001EED, 0x00001EEC}, {0x00001EEF, 0x00001EEE}, {0x00001EF1, 0x00001EF0}, {0x00001EF3, 0x00001EF2},
+{0x00001EF5, 0x00001EF4}, {0x00001EF7, 0x00001EF6}, {0x00001EF9, 0x00001EF8}, {0x00001EFB, 0x00001EFA},
+{0x00001EFD, 0x00001EFC}, {0x00001EFF, 0x00001EFE}, {0x00001F00, 0x00001F08}, {0x00001F01, 0x00001F09},
+{0x00001F02, 0x00001F0A}, {0x00001F03, 0x00001F0B}, {0x00001F04, 0x00001F0C}, {0x00001F05, 0x00001F0D},
+{0x00001F06, 0x00001F0E}, {0x00001F07, 0x00001F0F}, {0x00001F10, 0x00001F18}, {0x00001F11, 0x00001F19},
+{0x00001F12, 0x00001F1A}, {0x00001F13, 0x00001F1B}, {0x00001F14, 0x00001F1C}, {0x00001F15, 0x00001F1D},
+{0x00001F20, 0x00001F28}, {0x00001F21, 0x00001F29}, {0x00001F22, 0x00001F2A}, {0x00001F23, 0x00001F2B},
+{0x00001F24, 0x00001F2C}, {0x00001F25, 0x00001F2D}, {0x00001F26, 0x00001F2E}, {0x00001F27, 0x00001F2F},
+{0x00001F30, 0x00001F38}, {0x00001F31, 0x00001F39}, {0x00001F32, 0x00001F3A}, {0x00001F33, 0x00001F3B},
+{0x00001F34, 0x00001F3C}, {0x00001F35, 0x00001F3D}, {0x00001F36, 0x00001F3E}, {0x00001F37, 0x00001F3F},
+{0x00001F40, 0x00001F48}, {0x00001F41, 0x00001F49}, {0x00001F42, 0x00001F4A}, {0x00001F43, 0x00001F4B},
+{0x00001F44, 0x00001F4C}, {0x00001F45, 0x00001F4D}, {0x00001F50, 0x000003A5}, {0x00001F51, 0x00001F59},
+{0x00001F52, 0x000003A5}, {0x00001F53, 0x00001F5B}, {0x00001F54, 0x000003A5}, {0x00001F55, 0x00001F5D},
+{0x00001F56, 0x000003A5}, {0x00001F57, 0x00001F5F}, {0x00001F60, 0x00001F68}, {0x00001F61, 0x00001F69},
+{0x00001F62, 0x00001F6A}, {0x00001F63, 0x00001F6B}, {0x00001F64, 0x00001F6C}, {0x00001F65, 0x00001F6D},
+{0x00001F66, 0x00001F6E}, {0x00001F67, 0x00001F6F}, {0x00001F70, 0x00001FBA}, {0x00001F71, 0x00001FBB},
+{0x00001F72, 0x00001FC8}, {0x00001F73, 0x00001FC9}, {0x00001F74, 0x00001FCA}, {0x00001F75, 0x00001FCB},
+{0x00001F76, 0x00001FDA}, {0x00001F77, 0x00001FDB}, {0x00001F78, 0x00001FF8}, {0x00001F79, 0x00001FF9},
+{0x00001F7A, 0x00001FEA}, {0x00001F7B, 0x00001FEB}, {0x00001F7C, 0x00001FFA}, {0x00001F7D, 0x00001FFB},
+{0x00001F80, 0x00001F08}, {0x00001F81, 0x00001F09}, {0x00001F82, 0x00001F0A}, {0x00001F83, 0x00001F0B},
+{0x00001F84, 0x00001F0C}, {0x00001F85, 0x00001F0D}, {0x00001F86, 0x00001F0E}, {0x00001F87, 0x00001F0F},
+{0x00001F88, 0x00001F08}, {0x00001F89, 0x00001F09}, {0x00001F8A, 0x00001F0A}, {0x00001F8B, 0x00001F0B},
+{0x00001F8C, 0x00001F0C}, {0x00001F8D, 0x00001F0D}, {0x00001F8E, 0x00001F0E}, {0x00001F8F, 0x00001F0F},
+{0x00001F90, 0x00001F28}, {0x00001F91, 0x00001F29}, {0x00001F92, 0x00001F2A}, {0x00001F93, 0x00001F2B},
+{0x00001F94, 0x00001F2C}, {0x00001F95, 0x00001F2D}, {0x00001F96, 0x00001F2E}, {0x00001F97, 0x00001F2F},
+{0x00001F98, 0x00001F28}, {0x00001F99, 0x00001F29}, {0x00001F9A, 0x00001F2A}, {0x00001F9B, 0x00001F2B},
+{0x00001F9C, 0x00001F2C}, {0x00001F9D, 0x00001F2D}, {0x00001F9E, 0x00001F2E}, {0x00001F9F, 0x00001F2F},
+{0x00001FA0, 0x00001F68}, {0x00001FA1, 0x00001F69}, {0x00001FA2, 0x00001F6A}, {0x00001FA3, 0x00001F6B},
+{0x00001FA4, 0x00001F6C}, {0x00001FA5, 0x00001F6D}, {0x00001FA6, 0x00001F6E}, {0x00001FA7, 0x00001F6F},
+{0x00001FA8, 0x00001F68}, {0x00001FA9, 0x00001F69}, {0x00001FAA, 0x00001F6A}, {0x00001FAB, 0x00001F6B},
+{0x00001FAC, 0x00001F6C}, {0x00001FAD, 0x00001F6D}, {0x00001FAE, 0x00001F6E}, {0x00001FAF, 0x00001F6F},
+{0x00001FB0, 0x00001FB8}, {0x00001FB1, 0x00001FB9}, {0x00001FB2, 0x00001FBA}, {0x00001FB3, 0x00000391},
+{0x00001FB4, 0x00000386}, {0x00001FB6, 0x00000391}, {0x00001FB7, 0x00000391}, {0x00001FBC, 0x00000391},
+{0x00001FBE, 0x00000399}, {0x00001FC2, 0x00001FCA}, {0x00001FC3, 0x00000397}, {0x00001FC4, 0x00000389},
+{0x00001FC6, 0x00000397}, {0x00001FC7, 0x00000397}, {0x00001FCC, 0x00000397}, {0x00001FD0, 0x00001FD8},
+{0x00001FD1, 0x00001FD9}, {0x00001FD2, 0x00000399}, {0x00001FD3, 0x00000399}, {0x00001FD6, 0x00000399},
+{0x00001FD7, 0x00000399}, {0x00001FE0, 0x00001FE8}, {0x00001FE1, 0x00001FE9}, {0x00001FE2, 0x000003A5},
+{0x00001FE3, 0x000003A5}, {0x00001FE4, 0x000003A1}, {0x00001FE5, 0x00001FEC}, {0x00001FE6, 0x000003A5},
+{0x00001FE7, 0x000003A5}, {0x00001FF2, 0x00001FFA}, {0x00001FF3, 0x000003A9}, {0x00001FF4, 0x0000038F},
+{0x00001FF6, 0x000003A9}, {0x00001FF7, 0x000003A9}, {0x00001FFC, 0x000003A9}, {0x0000214E, 0x00002132},
+{0x00002170, 0x00002160}, {0x00002171, 0x00002161}, {0x00002172, 0x00002162}, {0x00002173, 0x00002163},
+{0x00002174, 0x00002164}, {0x00002175, 0x00002165}, {0x00002176, 0x00002166}, {0x00002177, 0x00002167},
+{0x00002178, 0x00002168}, {0x00002179, 0x00002169}, {0x0000217A, 0x0000216A}, {0x0000217B, 0x0000216B},
+{0x0000217C, 0x0000216C}, {0x0000217D, 0x0000216D}, {0x0000217E, 0x0000216E}, {0x0000217F, 0x0000216F},
+{0x00002184, 0x00002183}, {0x000024D0, 0x000024B6}, {0x000024D1, 0x000024B7}, {0x000024D2, 0x000024B8},
+{0x000024D3, 0x000024B9}, {0x000024D4, 0x000024BA}, {0x000024D5, 0x000024BB}, {0x000024D6, 0x000024BC},
+{0x000024D7, 0x000024BD}, {0x000024D8, 0x000024BE}, {0x000024D9, 0x000024BF}, {0x000024DA, 0x000024C0},
+{0x000024DB, 0x000024C1}, {0x000024DC, 0x000024C2}, {0x000024DD, 0x000024C3}, {0x000024DE, 0x000024C4},
+{0x000024DF, 0x000024C5}, {0x000024E0, 0x000024C6}, {0x000024E1, 0x000024C7}, {0x000024E2, 0x000024C8},
+{0x000024E3, 0x000024C9}, {0x000024E4, 0x000024CA}, {0x000024E5, 0x000024CB}, {0x000024E6, 0x000024CC},
+{0x000024E7, 0x000024CD}, {0x000024E8, 0x000024CE}, {0x000024E9, 0x000024CF}, {0x00002C30, 0x00002C00},
+{0x00002C31, 0x00002C01}, {0x00002C32, 0x00002C02}, {0x00002C33, 0x00002C03}, {0x00002C34, 0x00002C04},
+{0x00002C35, 0x00002C05}, {0x00002C36, 0x00002C06}, {0x00002C37, 0x00002C07}, {0x00002C38, 0x00002C08},
+{0x00002C39, 0x00002C09}, {0x00002C3A, 0x00002C0A}, {0x00002C3B, 0x00002C0B}, {0x00002C3C, 0x00002C0C},
+{0x00002C3D, 0x00002C0D}, {0x00002C3E, 0x00002C0E}, {0x00002C3F, 0x00002C0F}, {0x00002C40, 0x00002C10},
+{0x00002C41, 0x00002C11}, {0x00002C42, 0x00002C12}, {0x00002C43, 0x00002C13}, {0x00002C44, 0x00002C14},
+{0x00002C45, 0x00002C15}, {0x00002C46, 0x00002C16}, {0x00002C47, 0x00002C17}, {0x00002C48, 0x00002C18},
+{0x00002C49, 0x00002C19}, {0x00002C4A, 0x00002C1A}, {0x00002C4B, 0x00002C1B}, {0x00002C4C, 0x00002C1C},
+{0x00002C4D, 0x00002C1D}, {0x00002C4E, 0x00002C1E}, {0x00002C4F, 0x00002C1F}, {0x00002C50, 0x00002C20},
+{0x00002C51, 0x00002C21}, {0x00002C52, 0x00002C22}, {0x00002C53, 0x00002C23}, {0x00002C54, 0x00002C24},
+{0x00002C55, 0x00002C25}, {0x00002C56, 0x00002C26}, {0x00002C57, 0x00002C27}, {0x00002C58, 0x00002C28},
+{0x00002C59, 0x00002C29}, {0x00002C5A, 0x00002C2A}, {0x00002C5B, 0x00002C2B}, {0x00002C5C, 0x00002C2C},
+{0x00002C5D, 0x00002C2D}, {0x00002C5E, 0x00002C2E}, {0x00002C61, 0x00002C60}, {0x00002C65, 0x0000023A},
+{0x00002C66, 0x0000023E}, {0x00002C68, 0x00002C67}, {0x00002C6A, 0x00002C69}, {0x00002C6C, 0x00002C6B},
+{0x00002C73, 0x00002C72}, {0x00002C76, 0x00002C75}, {0x00002C81, 0x00002C80}, {0x00002C83, 0x00002C82},
+{0x00002C85, 0x00002C84}, {0x00002C87, 0x00002C86}, {0x00002C89, 0x00002C88}, {0x00002C8B, 0x00002C8A},
+{0x00002C8D, 0x00002C8C}, {0x00002C8F, 0x00002C8E}, {0x00002C91, 0x00002C90}, {0x00002C93, 0x00002C92},
+{0x00002C95, 0x00002C94}, {0x00002C97, 0x00002C96}, {0x00002C99, 0x00002C98}, {0x00002C9B, 0x00002C9A},
+{0x00002C9D, 0x00002C9C}, {0x00002C9F, 0x00002C9E}, {0x00002CA1, 0x00002CA0}, {0x00002CA3, 0x00002CA2},
+{0x00002CA5, 0x00002CA4}, {0x00002CA7, 0x00002CA6}, {0x00002CA9, 0x00002CA8}, {0x00002CAB, 0x00002CAA},
+{0x00002CAD, 0x00002CAC}, {0x00002CAF, 0x00002CAE}, {0x00002CB1, 0x00002CB0}, {0x00002CB3, 0x00002CB2},
+{0x00002CB5, 0x00002CB4}, {0x00002CB7, 0x00002CB6}, {0x00002CB9, 0x00002CB8}, {0x00002CBB, 0x00002CBA},
+{0x00002CBD, 0x00002CBC}, {0x00002CBF, 0x00002CBE}, {0x00002CC1, 0x00002CC0}, {0x00002CC3, 0x00002CC2},
+{0x00002CC5, 0x00002CC4}, {0x00002CC7, 0x00002CC6}, {0x00002CC9, 0x00002CC8}, {0x00002CCB, 0x00002CCA},
+{0x00002CCD, 0x00002CCC}, {0x00002CCF, 0x00002CCE}, {0x00002CD1, 0x00002CD0}, {0x00002CD3, 0x00002CD2},
+{0x00002CD5, 0x00002CD4}, {0x00002CD7, 0x00002CD6}, {0x00002CD9, 0x00002CD8}, {0x00002CDB, 0x00002CDA},
+{0x00002CDD, 0x00002CDC}, {0x00002CDF, 0x00002CDE}, {0x00002CE1, 0x00002CE0}, {0x00002CE3, 0x00002CE2},
+{0x00002CEC, 0x00002CEB}, {0x00002CEE, 0x00002CED}, {0x00002CF3, 0x00002CF2}, {0x00002D00, 0x000010A0},
+{0x00002D01, 0x000010A1}, {0x00002D02, 0x000010A2}, {0x00002D03, 0x000010A3}, {0x00002D04, 0x000010A4},
+{0x00002D05, 0x000010A5}, {0x00002D06, 0x000010A6}, {0x00002D07, 0x000010A7}, {0x00002D08, 0x000010A8},
+{0x00002D09, 0x000010A9}, {0x00002D0A, 0x000010AA}, {0x00002D0B, 0x000010AB}, {0x00002D0C, 0x000010AC},
+{0x00002D0D, 0x000010AD}, {0x00002D0E, 0x000010AE}, {0x00002D0F, 0x000010AF}, {0x00002D10, 0x000010B0},
+{0x00002D11, 0x000010B1}, {0x00002D12, 0x000010B2}, {0x00002D13, 0x000010B3}, {0x00002D14, 0x000010B4},
+{0x00002D15, 0x000010B5}, {0x00002D16, 0x000010B6}, {0x00002D17, 0x000010B7}, {0x00002D18, 0x000010B8},
+{0x00002D19, 0x000010B9}, {0x00002D1A, 0x000010BA}, {0x00002D1B, 0x000010BB}, {0x00002D1C, 0x000010BC},
+{0x00002D1D, 0x000010BD}, {0x00002D1E, 0x000010BE}, {0x00002D1F, 0x000010BF}, {0x00002D20, 0x000010C0},
+{0x00002D21, 0x000010C1}, {0x00002D22, 0x000010C2}, {0x00002D23, 0x000010C3}, {0x00002D24, 0x000010C4},
+{0x00002D25, 0x000010C5}, {0x00002D27, 0x000010C7}, {0x00002D2D, 0x000010CD}, {0x0000A641, 0x0000A640},
+{0x0000A643, 0x0000A642}, {0x0000A645, 0x0000A644}, {0x0000A647, 0x0000A646}, {0x0000A649, 0x0000A648},
+{0x0000A64B, 0x0000A64A}, {0x0000A64D, 0x0000A64C}, {0x0000A64F, 0x0000A64E}, {0x0000A651, 0x0000A650},
+{0x0000A653, 0x0000A652}, {0x0000A655, 0x0000A654}, {0x0000A657, 0x0000A656}, {0x0000A659, 0x0000A658},
+{0x0000A65B, 0x0000A65A}, {0x0000A65D, 0x0000A65C}, {0x0000A65F, 0x0000A65E}, {0x0000A661, 0x0000A660},
+{0x0000A663, 0x0000A662}, {0x0000A665, 0x0000A664}, {0x0000A667, 0x0000A666}, {0x0000A669, 0x0000A668},
+{0x0000A66B, 0x0000A66A}, {0x0000A66D, 0x0000A66C}, {0x0000A681, 0x0000A680}, {0x0000A683, 0x0000A682},
+{0x0000A685, 0x0000A684}, {0x0000A687, 0x0000A686}, {0x0000A689, 0x0000A688}, {0x0000A68B, 0x0000A68A},
+{0x0000A68D, 0x0000A68C}, {0x0000A68F, 0x0000A68E}, {0x0000A691, 0x0000A690}, {0x0000A693, 0x0000A692},
+{0x0000A695, 0x0000A694}, {0x0000A697, 0x0000A696}, {0x0000A699, 0x0000A698}, {0x0000A69B, 0x0000A69A},
+{0x0000A723, 0x0000A722}, {0x0000A725, 0x0000A724}, {0x0000A727, 0x0000A726}, {0x0000A729, 0x0000A728},
+{0x0000A72B, 0x0000A72A}, {0x0000A72D, 0x0000A72C}, {0x0000A72F, 0x0000A72E}, {0x0000A733, 0x0000A732},
+{0x0000A735, 0x0000A734}, {0x0000A737, 0x0000A736}, {0x0000A739, 0x0000A738}, {0x0000A73B, 0x0000A73A},
+{0x0000A73D, 0x0000A73C}, {0x0000A73F, 0x0000A73E}, {0x0000A741, 0x0000A740}, {0x0000A743, 0x0000A742},
+{0x0000A745, 0x0000A744}, {0x0000A747, 0x0000A746}, {0x0000A749, 0x0000A748}, {0x0000A74B, 0x0000A74A},
+{0x0000A74D, 0x0000A74C}, {0x0000A74F, 0x0000A74E}, {0x0000A751, 0x0000A750}, {0x0000A753, 0x0000A752},
+{0x0000A755, 0x0000A754}, {0x0000A757, 0x0000A756}, {0x0000A759, 0x0000A758}, {0x0000A75B, 0x0000A75A},
+{0x0000A75D, 0x0000A75C}, {0x0000A75F, 0x0000A75E}, {0x0000A761, 0x0000A760}, {0x0000A763, 0x0000A762},
+{0x0000A765, 0x0000A764}, {0x0000A767, 0x0000A766}, {0x0000A769, 0x0000A768}, {0x0000A76B, 0x0000A76A},
+{0x0000A76D, 0x0000A76C}, {0x0000A76F, 0x0000A76E}, {0x0000A77A, 0x0000A779}, {0x0000A77C, 0x0000A77B},
+{0x0000A77F, 0x0000A77E}, {0x0000A781, 0x0000A780}, {0x0000A783, 0x0000A782}, {0x0000A785, 0x0000A784},
+{0x0000A787, 0x0000A786}, {0x0000A78C, 0x0000A78B}, {0x0000A791, 0x0000A790}, {0x0000A793, 0x0000A792},
+{0x0000A794, 0x0000A7C4}, {0x0000A797, 0x0000A796}, {0x0000A799, 0x0000A798}, {0x0000A79B, 0x0000A79A},
+{0x0000A79D, 0x0000A79C}, {0x0000A79F, 0x0000A79E}, {0x0000A7A1, 0x0000A7A0}, {0x0000A7A3, 0x0000A7A2},
+{0x0000A7A5, 0x0000A7A4}, {0x0000A7A7, 0x0000A7A6}, {0x0000A7A9, 0x0000A7A8}, {0x0000A7B5, 0x0000A7B4},
+{0x0000A7B7, 0x0000A7B6}, {0x0000A7B9, 0x0000A7B8}, {0x0000A7BB, 0x0000A7BA}, {0x0000A7BD, 0x0000A7BC},
+{0x0000A7BF, 0x0000A7BE}, {0x0000A7C3, 0x0000A7C2}, {0x0000A7C8, 0x0000A7C7}, {0x0000A7CA, 0x0000A7C9},
+{0x0000A7F6, 0x0000A7F5}, {0x0000AB53, 0x0000A7B3}, {0x0000AB70, 0x000013A0}, {0x0000AB71, 0x000013A1},
+{0x0000AB72, 0x000013A2}, {0x0000AB73, 0x000013A3}, {0x0000AB74, 0x000013A4}, {0x0000AB75, 0x000013A5},
+{0x0000AB76, 0x000013A6}, {0x0000AB77, 0x000013A7}, {0x0000AB78, 0x000013A8}, {0x0000AB79, 0x000013A9},
+{0x0000AB7A, 0x000013AA}, {0x0000AB7B, 0x000013AB}, {0x0000AB7C, 0x000013AC}, {0x0000AB7D, 0x000013AD},
+{0x0000AB7E, 0x000013AE}, {0x0000AB7F, 0x000013AF}, {0x0000AB80, 0x000013B0}, {0x0000AB81, 0x000013B1},
+{0x0000AB82, 0x000013B2}, {0x0000AB83, 0x000013B3}, {0x0000AB84, 0x000013B4}, {0x0000AB85, 0x000013B5},
+{0x0000AB86, 0x000013B6}, {0x0000AB87, 0x000013B7}, {0x0000AB88, 0x000013B8}, {0x0000AB89, 0x000013B9},
+{0x0000AB8A, 0x000013BA}, {0x0000AB8B, 0x000013BB}, {0x0000AB8C, 0x000013BC}, {0x0000AB8D, 0x000013BD},
+{0x0000AB8E, 0x000013BE}, {0x0000AB8F, 0x000013BF}, {0x0000AB90, 0x000013C0}, {0x0000AB91, 0x000013C1},
+{0x0000AB92, 0x000013C2}, {0x0000AB93, 0x000013C3}, {0x0000AB94, 0x000013C4}, {0x0000AB95, 0x000013C5},
+{0x0000AB96, 0x000013C6}, {0x0000AB97, 0x000013C7}, {0x0000AB98, 0x000013C8}, {0x0000AB99, 0x000013C9},
+{0x0000AB9A, 0x000013CA}, {0x0000AB9B, 0x000013CB}, {0x0000AB9C, 0x000013CC}, {0x0000AB9D, 0x000013CD},
+{0x0000AB9E, 0x000013CE}, {0x0000AB9F, 0x000013CF}, {0x0000ABA0, 0x000013D0}, {0x0000ABA1, 0x000013D1},
+{0x0000ABA2, 0x000013D2}, {0x0000ABA3, 0x000013D3}, {0x0000ABA4, 0x000013D4}, {0x0000ABA5, 0x000013D5},
+{0x0000ABA6, 0x000013D6}, {0x0000ABA7, 0x000013D7}, {0x0000ABA8, 0x000013D8}, {0x0000ABA9, 0x000013D9},
+{0x0000ABAA, 0x000013DA}, {0x0000ABAB, 0x000013DB}, {0x0000ABAC, 0x000013DC}, {0x0000ABAD, 0x000013DD},
+{0x0000ABAE, 0x000013DE}, {0x0000ABAF, 0x000013DF}, {0x0000ABB0, 0x000013E0}, {0x0000ABB1, 0x000013E1},
+{0x0000ABB2, 0x000013E2}, {0x0000ABB3, 0x000013E3}, {0x0000ABB4, 0x000013E4}, {0x0000ABB5, 0x000013E5},
+{0x0000ABB6, 0x000013E6}, {0x0000ABB7, 0x000013E7}, {0x0000ABB8, 0x000013E8}, {0x0000ABB9, 0x000013E9},
+{0x0000ABBA, 0x000013EA}, {0x0000ABBB, 0x000013EB}, {0x0000ABBC, 0x000013EC}, {0x0000ABBD, 0x000013ED},
+{0x0000ABBE, 0x000013EE}, {0x0000ABBF, 0x000013EF}, {0x0000FB00, 0x00000046}, {0x0000FB01, 0x00000046},
+{0x0000FB02, 0x00000046}, {0x0000FB03, 0x00000046}, {0x0000FB04, 0x00000046}, {0x0000FB05, 0x00000053},
+{0x0000FB06, 0x00000053}, {0x0000FB13, 0x00000544}, {0x0000FB14, 0x00000544}, {0x0000FB15, 0x00000544},
+{0x0000FB16, 0x0000054E}, {0x0000FB17, 0x00000544}, {0x0000FF41, 0x0000FF21}, {0x0000FF42, 0x0000FF22},
+{0x0000FF43, 0x0000FF23}, {0x0000FF44, 0x0000FF24}, {0x0000FF45, 0x0000FF25}, {0x0000FF46, 0x0000FF26},
+{0x0000FF47, 0x0000FF27}, {0x0000FF48, 0x0000FF28}, {0x0000FF49, 0x0000FF29}, {0x0000FF4A, 0x0000FF2A},
+{0x0000FF4B, 0x0000FF2B}, {0x0000FF4C, 0x0000FF2C}, {0x0000FF4D, 0x0000FF2D}, {0x0000FF4E, 0x0000FF2E},
+{0x0000FF4F, 0x0000FF2F}, {0x0000FF50, 0x0000FF30}, {0x0000FF51, 0x0000FF31}, {0x0000FF52, 0x0000FF32},
+{0x0000FF53, 0x0000FF33}, {0x0000FF54, 0x0000FF34}, {0x0000FF55, 0x0000FF35}, {0x0000FF56, 0x0000FF36},
+{0x0000FF57, 0x0000FF37}, {0x0000FF58, 0x0000FF38}, {0x0000FF59, 0x0000FF39}, {0x0000FF5A, 0x0000FF3A},
+{0x00010428, 0x00010400}, {0x00010429, 0x00010401}, {0x0001042A, 0x00010402}, {0x0001042B, 0x00010403},
+{0x0001042C, 0x00010404}, {0x0001042D, 0x00010405}, {0x0001042E, 0x00010406}, {0x0001042F, 0x00010407},
+{0x00010430, 0x00010408}, {0x00010431, 0x00010409}, {0x00010432, 0x0001040A}, {0x00010433, 0x0001040B},
+{0x00010434, 0x0001040C}, {0x00010435, 0x0001040D}, {0x00010436, 0x0001040E}, {0x00010437, 0x0001040F},
+{0x00010438, 0x00010410}, {0x00010439, 0x00010411}, {0x0001043A, 0x00010412}, {0x0001043B, 0x00010413},
+{0x0001043C, 0x00010414}, {0x0001043D, 0x00010415}, {0x0001043E, 0x00010416}, {0x0001043F, 0x00010417},
+{0x00010440, 0x00010418}, {0x00010441, 0x00010419}, {0x00010442, 0x0001041A}, {0x00010443, 0x0001041B},
+{0x00010444, 0x0001041C}, {0x00010445, 0x0001041D}, {0x00010446, 0x0001041E}, {0x00010447, 0x0001041F},
+{0x00010448, 0x00010420}, {0x00010449, 0x00010421}, {0x0001044A, 0x00010422}, {0x0001044B, 0x00010423},
+{0x0001044C, 0x00010424}, {0x0001044D, 0x00010425}, {0x0001044E, 0x00010426}, {0x0001044F, 0x00010427},
+{0x000104D8, 0x000104B0}, {0x000104D9, 0x000104B1}, {0x000104DA, 0x000104B2}, {0x000104DB, 0x000104B3},
+{0x000104DC, 0x000104B4}, {0x000104DD, 0x000104B5}, {0x000104DE, 0x000104B6}, {0x000104DF, 0x000104B7},
+{0x000104E0, 0x000104B8}, {0x000104E1, 0x000104B9}, {0x000104E2, 0x000104BA}, {0x000104E3, 0x000104BB},
+{0x000104E4, 0x000104BC}, {0x000104E5, 0x000104BD}, {0x000104E6, 0x000104BE}, {0x000104E7, 0x000104BF},
+{0x000104E8, 0x000104C0}, {0x000104E9, 0x000104C1}, {0x000104EA, 0x000104C2}, {0x000104EB, 0x000104C3},
+{0x000104EC, 0x000104C4}, {0x000104ED, 0x000104C5}, {0x000104EE, 0x000104C6}, {0x000104EF, 0x000104C7},
+{0x000104F0, 0x000104C8}, {0x000104F1, 0x000104C9}, {0x000104F2, 0x000104CA}, {0x000104F3, 0x000104CB},
+{0x000104F4, 0x000104CC}, {0x000104F5, 0x000104CD}, {0x000104F6, 0x000104CE}, {0x000104F7, 0x000104CF},
+{0x000104F8, 0x000104D0}, {0x000104F9, 0x000104D1}, {0x000104FA, 0x000104D2}, {0x000104FB, 0x000104D3},
+{0x00010CC0, 0x00010C80}, {0x00010CC1, 0x00010C81}, {0x00010CC2, 0x00010C82}, {0x00010CC3, 0x00010C83},
+{0x00010CC4, 0x00010C84}, {0x00010CC5, 0x00010C85}, {0x00010CC6, 0x00010C86}, {0x00010CC7, 0x00010C87},
+{0x00010CC8, 0x00010C88}, {0x00010CC9, 0x00010C89}, {0x00010CCA, 0x00010C8A}, {0x00010CCB, 0x00010C8B},
+{0x00010CCC, 0x00010C8C}, {0x00010CCD, 0x00010C8D}, {0x00010CCE, 0x00010C8E}, {0x00010CCF, 0x00010C8F},
+{0x00010CD0, 0x00010C90}, {0x00010CD1, 0x00010C91}, {0x00010CD2, 0x00010C92}, {0x00010CD3, 0x00010C93},
+{0x00010CD4, 0x00010C94}, {0x00010CD5, 0x00010C95}, {0x00010CD6, 0x00010C96}, {0x00010CD7, 0x00010C97},
+{0x00010CD8, 0x00010C98}, {0x00010CD9, 0x00010C99}, {0x00010CDA, 0x00010C9A}, {0x00010CDB, 0x00010C9B},
+{0x00010CDC, 0x00010C9C}, {0x00010CDD, 0x00010C9D}, {0x00010CDE, 0x00010C9E}, {0x00010CDF, 0x00010C9F},
+{0x00010CE0, 0x00010CA0}, {0x00010CE1, 0x00010CA1}, {0x00010CE2, 0x00010CA2}, {0x00010CE3, 0x00010CA3},
+{0x00010CE4, 0x00010CA4}, {0x00010CE5, 0x00010CA5}, {0x00010CE6, 0x00010CA6}, {0x00010CE7, 0x00010CA7},
+{0x00010CE8, 0x00010CA8}, {0x00010CE9, 0x00010CA9}, {0x00010CEA, 0x00010CAA}, {0x00010CEB, 0x00010CAB},
+{0x00010CEC, 0x00010CAC}, {0x00010CED, 0x00010CAD}, {0x00010CEE, 0x00010CAE}, {0x00010CEF, 0x00010CAF},
+{0x00010CF0, 0x00010CB0}, {0x00010CF1, 0x00010CB1}, {0x00010CF2, 0x00010CB2}, {0x000118C0, 0x000118A0},
+{0x000118C1, 0x000118A1}, {0x000118C2, 0x000118A2}, {0x000118C3, 0x000118A3}, {0x000118C4, 0x000118A4},
+{0x000118C5, 0x000118A5}, {0x000118C6, 0x000118A6}, {0x000118C7, 0x000118A7}, {0x000118C8, 0x000118A8},
+{0x000118C9, 0x000118A9}, {0x000118CA, 0x000118AA}, {0x000118CB, 0x000118AB}, {0x000118CC, 0x000118AC},
+{0x000118CD, 0x000118AD}, {0x000118CE, 0x000118AE}, {0x000118CF, 0x000118AF}, {0x000118D0, 0x000118B0},
+{0x000118D1, 0x000118B1}, {0x000118D2, 0x000118B2}, {0x000118D3, 0x000118B3}, {0x000118D4, 0x000118B4},
+{0x000118D5, 0x000118B5}, {0x000118D6, 0x000118B6}, {0x000118D7, 0x000118B7}, {0x000118D8, 0x000118B8},
+{0x000118D9, 0x000118B9}, {0x000118DA, 0x000118BA}, {0x000118DB, 0x000118BB}, {0x000118DC, 0x000118BC},
+{0x000118DD, 0x000118BD}, {0x000118DE, 0x000118BE}, {0x000118DF, 0x000118BF}, {0x00016E60, 0x00016E40},
+{0x00016E61, 0x00016E41}, {0x00016E62, 0x00016E42}, {0x00016E63, 0x00016E43}, {0x00016E64, 0x00016E44},
+{0x00016E65, 0x00016E45}, {0x00016E66, 0x00016E46}, {0x00016E67, 0x00016E47}, {0x00016E68, 0x00016E48},
+{0x00016E69, 0x00016E49}, {0x00016E6A, 0x00016E4A}, {0x00016E6B, 0x00016E4B}, {0x00016E6C, 0x00016E4C},
+{0x00016E6D, 0x00016E4D}, {0x00016E6E, 0x00016E4E}, {0x00016E6F, 0x00016E4F}, {0x00016E70, 0x00016E50},
+{0x00016E71, 0x00016E51}, {0x00016E72, 0x00016E52}, {0x00016E73, 0x00016E53}, {0x00016E74, 0x00016E54},
+{0x00016E75, 0x00016E55}, {0x00016E76, 0x00016E56}, {0x00016E77, 0x00016E57}, {0x00016E78, 0x00016E58},
+{0x00016E79, 0x00016E59}, {0x00016E7A, 0x00016E5A}, {0x00016E7B, 0x00016E5B}, {0x00016E7C, 0x00016E5C},
+{0x00016E7D, 0x00016E5D}, {0x00016E7E, 0x00016E5E}, {0x00016E7F, 0x00016E5F}, {0x0001E922, 0x0001E900},
+{0x0001E923, 0x0001E901}, {0x0001E924, 0x0001E902}, {0x0001E925, 0x0001E903}, {0x0001E926, 0x0001E904},
+{0x0001E927, 0x0001E905}, {0x0001E928, 0x0001E906}, {0x0001E929, 0x0001E907}, {0x0001E92A, 0x0001E908},
+{0x0001E92B, 0x0001E909}, {0x0001E92C, 0x0001E90A}, {0x0001E92D, 0x0001E90B}, {0x0001E92E, 0x0001E90C},
+{0x0001E92F, 0x0001E90D}, {0x0001E930, 0x0001E90E}, {0x0001E931, 0x0001E90F}, {0x0001E932, 0x0001E910},
+{0x0001E933, 0x0001E911}, {0x0001E934, 0x0001E912}, {0x0001E935, 0x0001E913}, {0x0001E936, 0x0001E914},
+{0x0001E937, 0x0001E915}, {0x0001E938, 0x0001E916}, {0x0001E939, 0x0001E917}, {0x0001E93A, 0x0001E918},
+{0x0001E93B, 0x0001E919}, {0x0001E93C, 0x0001E91A}, {0x0001E93D, 0x0001E91B}, {0x0001E93E, 0x0001E91C},
+{0x0001E93F, 0x0001E91D}, {0x0001E940, 0x0001E91E}, {0x0001E941, 0x0001E91F}, {0x0001E942, 0x0001E920},
+{0x0001E943, 0x0001E921},
};
const std::multimap unicode_map_nfd = {
@@ -1407,245 +2181,3 @@ const std::multimap unicode_map_nfd = {
{0x0002FA16, 0x00004D56}, {0x0002FA17, 0x00009EF9}, {0x0002FA18, 0x00009EFE}, {0x0002FA19, 0x00009F05},
{0x0002FA1A, 0x00009F0F}, {0x0002FA1B, 0x00009F16}, {0x0002FA1D, 0x0002A600},
};
-
-const std::map unicode_map_lowercase = {
-{0x00041, 0x00061}, {0x00042, 0x00062}, {0x00043, 0x00063}, {0x00044, 0x00064}, {0x00045, 0x00065}, {0x00046, 0x00066},
-{0x00047, 0x00067}, {0x00048, 0x00068}, {0x00049, 0x00069}, {0x0004A, 0x0006A}, {0x0004B, 0x0006B}, {0x0004C, 0x0006C},
-{0x0004D, 0x0006D}, {0x0004E, 0x0006E}, {0x0004F, 0x0006F}, {0x00050, 0x00070}, {0x00051, 0x00071}, {0x00052, 0x00072},
-{0x00053, 0x00073}, {0x00054, 0x00074}, {0x00055, 0x00075}, {0x00056, 0x00076}, {0x00057, 0x00077}, {0x00058, 0x00078},
-{0x00059, 0x00079}, {0x0005A, 0x0007A}, {0x000C0, 0x000E0}, {0x000C1, 0x000E1}, {0x000C2, 0x000E2}, {0x000C3, 0x000E3},
-{0x000C4, 0x000E4}, {0x000C5, 0x000E5}, {0x000C6, 0x000E6}, {0x000C7, 0x000E7}, {0x000C8, 0x000E8}, {0x000C9, 0x000E9},
-{0x000CA, 0x000EA}, {0x000CB, 0x000EB}, {0x000CC, 0x000EC}, {0x000CD, 0x000ED}, {0x000CE, 0x000EE}, {0x000CF, 0x000EF},
-{0x000D0, 0x000F0}, {0x000D1, 0x000F1}, {0x000D2, 0x000F2}, {0x000D3, 0x000F3}, {0x000D4, 0x000F4}, {0x000D5, 0x000F5},
-{0x000D6, 0x000F6}, {0x000D8, 0x000F8}, {0x000D9, 0x000F9}, {0x000DA, 0x000FA}, {0x000DB, 0x000FB}, {0x000DC, 0x000FC},
-{0x000DD, 0x000FD}, {0x000DE, 0x000FE}, {0x00100, 0x00101}, {0x00102, 0x00103}, {0x00104, 0x00105}, {0x00106, 0x00107},
-{0x00108, 0x00109}, {0x0010A, 0x0010B}, {0x0010C, 0x0010D}, {0x0010E, 0x0010F}, {0x00110, 0x00111}, {0x00112, 0x00113},
-{0x00114, 0x00115}, {0x00116, 0x00117}, {0x00118, 0x00119}, {0x0011A, 0x0011B}, {0x0011C, 0x0011D}, {0x0011E, 0x0011F},
-{0x00120, 0x00121}, {0x00122, 0x00123}, {0x00124, 0x00125}, {0x00126, 0x00127}, {0x00128, 0x00129}, {0x0012A, 0x0012B},
-{0x0012C, 0x0012D}, {0x0012E, 0x0012F}, {0x00130, 0x00069}, {0x00132, 0x00133}, {0x00134, 0x00135}, {0x00136, 0x00137},
-{0x00139, 0x0013A}, {0x0013B, 0x0013C}, {0x0013D, 0x0013E}, {0x0013F, 0x00140}, {0x00141, 0x00142}, {0x00143, 0x00144},
-{0x00145, 0x00146}, {0x00147, 0x00148}, {0x0014A, 0x0014B}, {0x0014C, 0x0014D}, {0x0014E, 0x0014F}, {0x00150, 0x00151},
-{0x00152, 0x00153}, {0x00154, 0x00155}, {0x00156, 0x00157}, {0x00158, 0x00159}, {0x0015A, 0x0015B}, {0x0015C, 0x0015D},
-{0x0015E, 0x0015F}, {0x00160, 0x00161}, {0x00162, 0x00163}, {0x00164, 0x00165}, {0x00166, 0x00167}, {0x00168, 0x00169},
-{0x0016A, 0x0016B}, {0x0016C, 0x0016D}, {0x0016E, 0x0016F}, {0x00170, 0x00171}, {0x00172, 0x00173}, {0x00174, 0x00175},
-{0x00176, 0x00177}, {0x00178, 0x000FF}, {0x00179, 0x0017A}, {0x0017B, 0x0017C}, {0x0017D, 0x0017E}, {0x00181, 0x00253},
-{0x00182, 0x00183}, {0x00184, 0x00185}, {0x00186, 0x00254}, {0x00187, 0x00188}, {0x00189, 0x00256}, {0x0018A, 0x00257},
-{0x0018B, 0x0018C}, {0x0018E, 0x001DD}, {0x0018F, 0x00259}, {0x00190, 0x0025B}, {0x00191, 0x00192}, {0x00193, 0x00260},
-{0x00194, 0x00263}, {0x00196, 0x00269}, {0x00197, 0x00268}, {0x00198, 0x00199}, {0x0019C, 0x0026F}, {0x0019D, 0x00272},
-{0x0019F, 0x00275}, {0x001A0, 0x001A1}, {0x001A2, 0x001A3}, {0x001A4, 0x001A5}, {0x001A6, 0x00280}, {0x001A7, 0x001A8},
-{0x001A9, 0x00283}, {0x001AC, 0x001AD}, {0x001AE, 0x00288}, {0x001AF, 0x001B0}, {0x001B1, 0x0028A}, {0x001B2, 0x0028B},
-{0x001B3, 0x001B4}, {0x001B5, 0x001B6}, {0x001B7, 0x00292}, {0x001B8, 0x001B9}, {0x001BC, 0x001BD}, {0x001C4, 0x001C6},
-{0x001C5, 0x001C6}, {0x001C7, 0x001C9}, {0x001C8, 0x001C9}, {0x001CA, 0x001CC}, {0x001CB, 0x001CC}, {0x001CD, 0x001CE},
-{0x001CF, 0x001D0}, {0x001D1, 0x001D2}, {0x001D3, 0x001D4}, {0x001D5, 0x001D6}, {0x001D7, 0x001D8}, {0x001D9, 0x001DA},
-{0x001DB, 0x001DC}, {0x001DE, 0x001DF}, {0x001E0, 0x001E1}, {0x001E2, 0x001E3}, {0x001E4, 0x001E5}, {0x001E6, 0x001E7},
-{0x001E8, 0x001E9}, {0x001EA, 0x001EB}, {0x001EC, 0x001ED}, {0x001EE, 0x001EF}, {0x001F1, 0x001F3}, {0x001F2, 0x001F3},
-{0x001F4, 0x001F5}, {0x001F6, 0x00195}, {0x001F7, 0x001BF}, {0x001F8, 0x001F9}, {0x001FA, 0x001FB}, {0x001FC, 0x001FD},
-{0x001FE, 0x001FF}, {0x00200, 0x00201}, {0x00202, 0x00203}, {0x00204, 0x00205}, {0x00206, 0x00207}, {0x00208, 0x00209},
-{0x0020A, 0x0020B}, {0x0020C, 0x0020D}, {0x0020E, 0x0020F}, {0x00210, 0x00211}, {0x00212, 0x00213}, {0x00214, 0x00215},
-{0x00216, 0x00217}, {0x00218, 0x00219}, {0x0021A, 0x0021B}, {0x0021C, 0x0021D}, {0x0021E, 0x0021F}, {0x00220, 0x0019E},
-{0x00222, 0x00223}, {0x00224, 0x00225}, {0x00226, 0x00227}, {0x00228, 0x00229}, {0x0022A, 0x0022B}, {0x0022C, 0x0022D},
-{0x0022E, 0x0022F}, {0x00230, 0x00231}, {0x00232, 0x00233}, {0x0023A, 0x02C65}, {0x0023B, 0x0023C}, {0x0023D, 0x0019A},
-{0x0023E, 0x02C66}, {0x00241, 0x00242}, {0x00243, 0x00180}, {0x00244, 0x00289}, {0x00245, 0x0028C}, {0x00246, 0x00247},
-{0x00248, 0x00249}, {0x0024A, 0x0024B}, {0x0024C, 0x0024D}, {0x0024E, 0x0024F}, {0x00370, 0x00371}, {0x00372, 0x00373},
-{0x00376, 0x00377}, {0x0037F, 0x003F3}, {0x00386, 0x003AC}, {0x00388, 0x003AD}, {0x00389, 0x003AE}, {0x0038A, 0x003AF},
-{0x0038C, 0x003CC}, {0x0038E, 0x003CD}, {0x0038F, 0x003CE}, {0x00391, 0x003B1}, {0x00392, 0x003B2}, {0x00393, 0x003B3},
-{0x00394, 0x003B4}, {0x00395, 0x003B5}, {0x00396, 0x003B6}, {0x00397, 0x003B7}, {0x00398, 0x003B8}, {0x00399, 0x003B9},
-{0x0039A, 0x003BA}, {0x0039B, 0x003BB}, {0x0039C, 0x003BC}, {0x0039D, 0x003BD}, {0x0039E, 0x003BE}, {0x0039F, 0x003BF},
-{0x003A0, 0x003C0}, {0x003A1, 0x003C1}, {0x003A3, 0x003C3}, {0x003A4, 0x003C4}, {0x003A5, 0x003C5}, {0x003A6, 0x003C6},
-{0x003A7, 0x003C7}, {0x003A8, 0x003C8}, {0x003A9, 0x003C9}, {0x003AA, 0x003CA}, {0x003AB, 0x003CB}, {0x003CF, 0x003D7},
-{0x003D8, 0x003D9}, {0x003DA, 0x003DB}, {0x003DC, 0x003DD}, {0x003DE, 0x003DF}, {0x003E0, 0x003E1}, {0x003E2, 0x003E3},
-{0x003E4, 0x003E5}, {0x003E6, 0x003E7}, {0x003E8, 0x003E9}, {0x003EA, 0x003EB}, {0x003EC, 0x003ED}, {0x003EE, 0x003EF},
-{0x003F4, 0x003B8}, {0x003F7, 0x003F8}, {0x003F9, 0x003F2}, {0x003FA, 0x003FB}, {0x003FD, 0x0037B}, {0x003FE, 0x0037C},
-{0x003FF, 0x0037D}, {0x00400, 0x00450}, {0x00401, 0x00451}, {0x00402, 0x00452}, {0x00403, 0x00453}, {0x00404, 0x00454},
-{0x00405, 0x00455}, {0x00406, 0x00456}, {0x00407, 0x00457}, {0x00408, 0x00458}, {0x00409, 0x00459}, {0x0040A, 0x0045A},
-{0x0040B, 0x0045B}, {0x0040C, 0x0045C}, {0x0040D, 0x0045D}, {0x0040E, 0x0045E}, {0x0040F, 0x0045F}, {0x00410, 0x00430},
-{0x00411, 0x00431}, {0x00412, 0x00432}, {0x00413, 0x00433}, {0x00414, 0x00434}, {0x00415, 0x00435}, {0x00416, 0x00436},
-{0x00417, 0x00437}, {0x00418, 0x00438}, {0x00419, 0x00439}, {0x0041A, 0x0043A}, {0x0041B, 0x0043B}, {0x0041C, 0x0043C},
-{0x0041D, 0x0043D}, {0x0041E, 0x0043E}, {0x0041F, 0x0043F}, {0x00420, 0x00440}, {0x00421, 0x00441}, {0x00422, 0x00442},
-{0x00423, 0x00443}, {0x00424, 0x00444}, {0x00425, 0x00445}, {0x00426, 0x00446}, {0x00427, 0x00447}, {0x00428, 0x00448},
-{0x00429, 0x00449}, {0x0042A, 0x0044A}, {0x0042B, 0x0044B}, {0x0042C, 0x0044C}, {0x0042D, 0x0044D}, {0x0042E, 0x0044E},
-{0x0042F, 0x0044F}, {0x00460, 0x00461}, {0x00462, 0x00463}, {0x00464, 0x00465}, {0x00466, 0x00467}, {0x00468, 0x00469},
-{0x0046A, 0x0046B}, {0x0046C, 0x0046D}, {0x0046E, 0x0046F}, {0x00470, 0x00471}, {0x00472, 0x00473}, {0x00474, 0x00475},
-{0x00476, 0x00477}, {0x00478, 0x00479}, {0x0047A, 0x0047B}, {0x0047C, 0x0047D}, {0x0047E, 0x0047F}, {0x00480, 0x00481},
-{0x0048A, 0x0048B}, {0x0048C, 0x0048D}, {0x0048E, 0x0048F}, {0x00490, 0x00491}, {0x00492, 0x00493}, {0x00494, 0x00495},
-{0x00496, 0x00497}, {0x00498, 0x00499}, {0x0049A, 0x0049B}, {0x0049C, 0x0049D}, {0x0049E, 0x0049F}, {0x004A0, 0x004A1},
-{0x004A2, 0x004A3}, {0x004A4, 0x004A5}, {0x004A6, 0x004A7}, {0x004A8, 0x004A9}, {0x004AA, 0x004AB}, {0x004AC, 0x004AD},
-{0x004AE, 0x004AF}, {0x004B0, 0x004B1}, {0x004B2, 0x004B3}, {0x004B4, 0x004B5}, {0x004B6, 0x004B7}, {0x004B8, 0x004B9},
-{0x004BA, 0x004BB}, {0x004BC, 0x004BD}, {0x004BE, 0x004BF}, {0x004C0, 0x004CF}, {0x004C1, 0x004C2}, {0x004C3, 0x004C4},
-{0x004C5, 0x004C6}, {0x004C7, 0x004C8}, {0x004C9, 0x004CA}, {0x004CB, 0x004CC}, {0x004CD, 0x004CE}, {0x004D0, 0x004D1},
-{0x004D2, 0x004D3}, {0x004D4, 0x004D5}, {0x004D6, 0x004D7}, {0x004D8, 0x004D9}, {0x004DA, 0x004DB}, {0x004DC, 0x004DD},
-{0x004DE, 0x004DF}, {0x004E0, 0x004E1}, {0x004E2, 0x004E3}, {0x004E4, 0x004E5}, {0x004E6, 0x004E7}, {0x004E8, 0x004E9},
-{0x004EA, 0x004EB}, {0x004EC, 0x004ED}, {0x004EE, 0x004EF}, {0x004F0, 0x004F1}, {0x004F2, 0x004F3}, {0x004F4, 0x004F5},
-{0x004F6, 0x004F7}, {0x004F8, 0x004F9}, {0x004FA, 0x004FB}, {0x004FC, 0x004FD}, {0x004FE, 0x004FF}, {0x00500, 0x00501},
-{0x00502, 0x00503}, {0x00504, 0x00505}, {0x00506, 0x00507}, {0x00508, 0x00509}, {0x0050A, 0x0050B}, {0x0050C, 0x0050D},
-{0x0050E, 0x0050F}, {0x00510, 0x00511}, {0x00512, 0x00513}, {0x00514, 0x00515}, {0x00516, 0x00517}, {0x00518, 0x00519},
-{0x0051A, 0x0051B}, {0x0051C, 0x0051D}, {0x0051E, 0x0051F}, {0x00520, 0x00521}, {0x00522, 0x00523}, {0x00524, 0x00525},
-{0x00526, 0x00527}, {0x00528, 0x00529}, {0x0052A, 0x0052B}, {0x0052C, 0x0052D}, {0x0052E, 0x0052F}, {0x00531, 0x00561},
-{0x00532, 0x00562}, {0x00533, 0x00563}, {0x00534, 0x00564}, {0x00535, 0x00565}, {0x00536, 0x00566}, {0x00537, 0x00567},
-{0x00538, 0x00568}, {0x00539, 0x00569}, {0x0053A, 0x0056A}, {0x0053B, 0x0056B}, {0x0053C, 0x0056C}, {0x0053D, 0x0056D},
-{0x0053E, 0x0056E}, {0x0053F, 0x0056F}, {0x00540, 0x00570}, {0x00541, 0x00571}, {0x00542, 0x00572}, {0x00543, 0x00573},
-{0x00544, 0x00574}, {0x00545, 0x00575}, {0x00546, 0x00576}, {0x00547, 0x00577}, {0x00548, 0x00578}, {0x00549, 0x00579},
-{0x0054A, 0x0057A}, {0x0054B, 0x0057B}, {0x0054C, 0x0057C}, {0x0054D, 0x0057D}, {0x0054E, 0x0057E}, {0x0054F, 0x0057F},
-{0x00550, 0x00580}, {0x00551, 0x00581}, {0x00552, 0x00582}, {0x00553, 0x00583}, {0x00554, 0x00584}, {0x00555, 0x00585},
-{0x00556, 0x00586}, {0x010A0, 0x02D00}, {0x010A1, 0x02D01}, {0x010A2, 0x02D02}, {0x010A3, 0x02D03}, {0x010A4, 0x02D04},
-{0x010A5, 0x02D05}, {0x010A6, 0x02D06}, {0x010A7, 0x02D07}, {0x010A8, 0x02D08}, {0x010A9, 0x02D09}, {0x010AA, 0x02D0A},
-{0x010AB, 0x02D0B}, {0x010AC, 0x02D0C}, {0x010AD, 0x02D0D}, {0x010AE, 0x02D0E}, {0x010AF, 0x02D0F}, {0x010B0, 0x02D10},
-{0x010B1, 0x02D11}, {0x010B2, 0x02D12}, {0x010B3, 0x02D13}, {0x010B4, 0x02D14}, {0x010B5, 0x02D15}, {0x010B6, 0x02D16},
-{0x010B7, 0x02D17}, {0x010B8, 0x02D18}, {0x010B9, 0x02D19}, {0x010BA, 0x02D1A}, {0x010BB, 0x02D1B}, {0x010BC, 0x02D1C},
-{0x010BD, 0x02D1D}, {0x010BE, 0x02D1E}, {0x010BF, 0x02D1F}, {0x010C0, 0x02D20}, {0x010C1, 0x02D21}, {0x010C2, 0x02D22},
-{0x010C3, 0x02D23}, {0x010C4, 0x02D24}, {0x010C5, 0x02D25}, {0x010C7, 0x02D27}, {0x010CD, 0x02D2D}, {0x013A0, 0x0AB70},
-{0x013A1, 0x0AB71}, {0x013A2, 0x0AB72}, {0x013A3, 0x0AB73}, {0x013A4, 0x0AB74}, {0x013A5, 0x0AB75}, {0x013A6, 0x0AB76},
-{0x013A7, 0x0AB77}, {0x013A8, 0x0AB78}, {0x013A9, 0x0AB79}, {0x013AA, 0x0AB7A}, {0x013AB, 0x0AB7B}, {0x013AC, 0x0AB7C},
-{0x013AD, 0x0AB7D}, {0x013AE, 0x0AB7E}, {0x013AF, 0x0AB7F}, {0x013B0, 0x0AB80}, {0x013B1, 0x0AB81}, {0x013B2, 0x0AB82},
-{0x013B3, 0x0AB83}, {0x013B4, 0x0AB84}, {0x013B5, 0x0AB85}, {0x013B6, 0x0AB86}, {0x013B7, 0x0AB87}, {0x013B8, 0x0AB88},
-{0x013B9, 0x0AB89}, {0x013BA, 0x0AB8A}, {0x013BB, 0x0AB8B}, {0x013BC, 0x0AB8C}, {0x013BD, 0x0AB8D}, {0x013BE, 0x0AB8E},
-{0x013BF, 0x0AB8F}, {0x013C0, 0x0AB90}, {0x013C1, 0x0AB91}, {0x013C2, 0x0AB92}, {0x013C3, 0x0AB93}, {0x013C4, 0x0AB94},
-{0x013C5, 0x0AB95}, {0x013C6, 0x0AB96}, {0x013C7, 0x0AB97}, {0x013C8, 0x0AB98}, {0x013C9, 0x0AB99}, {0x013CA, 0x0AB9A},
-{0x013CB, 0x0AB9B}, {0x013CC, 0x0AB9C}, {0x013CD, 0x0AB9D}, {0x013CE, 0x0AB9E}, {0x013CF, 0x0AB9F}, {0x013D0, 0x0ABA0},
-{0x013D1, 0x0ABA1}, {0x013D2, 0x0ABA2}, {0x013D3, 0x0ABA3}, {0x013D4, 0x0ABA4}, {0x013D5, 0x0ABA5}, {0x013D6, 0x0ABA6},
-{0x013D7, 0x0ABA7}, {0x013D8, 0x0ABA8}, {0x013D9, 0x0ABA9}, {0x013DA, 0x0ABAA}, {0x013DB, 0x0ABAB}, {0x013DC, 0x0ABAC},
-{0x013DD, 0x0ABAD}, {0x013DE, 0x0ABAE}, {0x013DF, 0x0ABAF}, {0x013E0, 0x0ABB0}, {0x013E1, 0x0ABB1}, {0x013E2, 0x0ABB2},
-{0x013E3, 0x0ABB3}, {0x013E4, 0x0ABB4}, {0x013E5, 0x0ABB5}, {0x013E6, 0x0ABB6}, {0x013E7, 0x0ABB7}, {0x013E8, 0x0ABB8},
-{0x013E9, 0x0ABB9}, {0x013EA, 0x0ABBA}, {0x013EB, 0x0ABBB}, {0x013EC, 0x0ABBC}, {0x013ED, 0x0ABBD}, {0x013EE, 0x0ABBE},
-{0x013EF, 0x0ABBF}, {0x013F0, 0x013F8}, {0x013F1, 0x013F9}, {0x013F2, 0x013FA}, {0x013F3, 0x013FB}, {0x013F4, 0x013FC},
-{0x013F5, 0x013FD}, {0x01C90, 0x010D0}, {0x01C91, 0x010D1}, {0x01C92, 0x010D2}, {0x01C93, 0x010D3}, {0x01C94, 0x010D4},
-{0x01C95, 0x010D5}, {0x01C96, 0x010D6}, {0x01C97, 0x010D7}, {0x01C98, 0x010D8}, {0x01C99, 0x010D9}, {0x01C9A, 0x010DA},
-{0x01C9B, 0x010DB}, {0x01C9C, 0x010DC}, {0x01C9D, 0x010DD}, {0x01C9E, 0x010DE}, {0x01C9F, 0x010DF}, {0x01CA0, 0x010E0},
-{0x01CA1, 0x010E1}, {0x01CA2, 0x010E2}, {0x01CA3, 0x010E3}, {0x01CA4, 0x010E4}, {0x01CA5, 0x010E5}, {0x01CA6, 0x010E6},
-{0x01CA7, 0x010E7}, {0x01CA8, 0x010E8}, {0x01CA9, 0x010E9}, {0x01CAA, 0x010EA}, {0x01CAB, 0x010EB}, {0x01CAC, 0x010EC},
-{0x01CAD, 0x010ED}, {0x01CAE, 0x010EE}, {0x01CAF, 0x010EF}, {0x01CB0, 0x010F0}, {0x01CB1, 0x010F1}, {0x01CB2, 0x010F2},
-{0x01CB3, 0x010F3}, {0x01CB4, 0x010F4}, {0x01CB5, 0x010F5}, {0x01CB6, 0x010F6}, {0x01CB7, 0x010F7}, {0x01CB8, 0x010F8},
-{0x01CB9, 0x010F9}, {0x01CBA, 0x010FA}, {0x01CBD, 0x010FD}, {0x01CBE, 0x010FE}, {0x01CBF, 0x010FF}, {0x01E00, 0x01E01},
-{0x01E02, 0x01E03}, {0x01E04, 0x01E05}, {0x01E06, 0x01E07}, {0x01E08, 0x01E09}, {0x01E0A, 0x01E0B}, {0x01E0C, 0x01E0D},
-{0x01E0E, 0x01E0F}, {0x01E10, 0x01E11}, {0x01E12, 0x01E13}, {0x01E14, 0x01E15}, {0x01E16, 0x01E17}, {0x01E18, 0x01E19},
-{0x01E1A, 0x01E1B}, {0x01E1C, 0x01E1D}, {0x01E1E, 0x01E1F}, {0x01E20, 0x01E21}, {0x01E22, 0x01E23}, {0x01E24, 0x01E25},
-{0x01E26, 0x01E27}, {0x01E28, 0x01E29}, {0x01E2A, 0x01E2B}, {0x01E2C, 0x01E2D}, {0x01E2E, 0x01E2F}, {0x01E30, 0x01E31},
-{0x01E32, 0x01E33}, {0x01E34, 0x01E35}, {0x01E36, 0x01E37}, {0x01E38, 0x01E39}, {0x01E3A, 0x01E3B}, {0x01E3C, 0x01E3D},
-{0x01E3E, 0x01E3F}, {0x01E40, 0x01E41}, {0x01E42, 0x01E43}, {0x01E44, 0x01E45}, {0x01E46, 0x01E47}, {0x01E48, 0x01E49},
-{0x01E4A, 0x01E4B}, {0x01E4C, 0x01E4D}, {0x01E4E, 0x01E4F}, {0x01E50, 0x01E51}, {0x01E52, 0x01E53}, {0x01E54, 0x01E55},
-{0x01E56, 0x01E57}, {0x01E58, 0x01E59}, {0x01E5A, 0x01E5B}, {0x01E5C, 0x01E5D}, {0x01E5E, 0x01E5F}, {0x01E60, 0x01E61},
-{0x01E62, 0x01E63}, {0x01E64, 0x01E65}, {0x01E66, 0x01E67}, {0x01E68, 0x01E69}, {0x01E6A, 0x01E6B}, {0x01E6C, 0x01E6D},
-{0x01E6E, 0x01E6F}, {0x01E70, 0x01E71}, {0x01E72, 0x01E73}, {0x01E74, 0x01E75}, {0x01E76, 0x01E77}, {0x01E78, 0x01E79},
-{0x01E7A, 0x01E7B}, {0x01E7C, 0x01E7D}, {0x01E7E, 0x01E7F}, {0x01E80, 0x01E81}, {0x01E82, 0x01E83}, {0x01E84, 0x01E85},
-{0x01E86, 0x01E87}, {0x01E88, 0x01E89}, {0x01E8A, 0x01E8B}, {0x01E8C, 0x01E8D}, {0x01E8E, 0x01E8F}, {0x01E90, 0x01E91},
-{0x01E92, 0x01E93}, {0x01E94, 0x01E95}, {0x01E9E, 0x000DF}, {0x01EA0, 0x01EA1}, {0x01EA2, 0x01EA3}, {0x01EA4, 0x01EA5},
-{0x01EA6, 0x01EA7}, {0x01EA8, 0x01EA9}, {0x01EAA, 0x01EAB}, {0x01EAC, 0x01EAD}, {0x01EAE, 0x01EAF}, {0x01EB0, 0x01EB1},
-{0x01EB2, 0x01EB3}, {0x01EB4, 0x01EB5}, {0x01EB6, 0x01EB7}, {0x01EB8, 0x01EB9}, {0x01EBA, 0x01EBB}, {0x01EBC, 0x01EBD},
-{0x01EBE, 0x01EBF}, {0x01EC0, 0x01EC1}, {0x01EC2, 0x01EC3}, {0x01EC4, 0x01EC5}, {0x01EC6, 0x01EC7}, {0x01EC8, 0x01EC9},
-{0x01ECA, 0x01ECB}, {0x01ECC, 0x01ECD}, {0x01ECE, 0x01ECF}, {0x01ED0, 0x01ED1}, {0x01ED2, 0x01ED3}, {0x01ED4, 0x01ED5},
-{0x01ED6, 0x01ED7}, {0x01ED8, 0x01ED9}, {0x01EDA, 0x01EDB}, {0x01EDC, 0x01EDD}, {0x01EDE, 0x01EDF}, {0x01EE0, 0x01EE1},
-{0x01EE2, 0x01EE3}, {0x01EE4, 0x01EE5}, {0x01EE6, 0x01EE7}, {0x01EE8, 0x01EE9}, {0x01EEA, 0x01EEB}, {0x01EEC, 0x01EED},
-{0x01EEE, 0x01EEF}, {0x01EF0, 0x01EF1}, {0x01EF2, 0x01EF3}, {0x01EF4, 0x01EF5}, {0x01EF6, 0x01EF7}, {0x01EF8, 0x01EF9},
-{0x01EFA, 0x01EFB}, {0x01EFC, 0x01EFD}, {0x01EFE, 0x01EFF}, {0x01F08, 0x01F00}, {0x01F09, 0x01F01}, {0x01F0A, 0x01F02},
-{0x01F0B, 0x01F03}, {0x01F0C, 0x01F04}, {0x01F0D, 0x01F05}, {0x01F0E, 0x01F06}, {0x01F0F, 0x01F07}, {0x01F18, 0x01F10},
-{0x01F19, 0x01F11}, {0x01F1A, 0x01F12}, {0x01F1B, 0x01F13}, {0x01F1C, 0x01F14}, {0x01F1D, 0x01F15}, {0x01F28, 0x01F20},
-{0x01F29, 0x01F21}, {0x01F2A, 0x01F22}, {0x01F2B, 0x01F23}, {0x01F2C, 0x01F24}, {0x01F2D, 0x01F25}, {0x01F2E, 0x01F26},
-{0x01F2F, 0x01F27}, {0x01F38, 0x01F30}, {0x01F39, 0x01F31}, {0x01F3A, 0x01F32}, {0x01F3B, 0x01F33}, {0x01F3C, 0x01F34},
-{0x01F3D, 0x01F35}, {0x01F3E, 0x01F36}, {0x01F3F, 0x01F37}, {0x01F48, 0x01F40}, {0x01F49, 0x01F41}, {0x01F4A, 0x01F42},
-{0x01F4B, 0x01F43}, {0x01F4C, 0x01F44}, {0x01F4D, 0x01F45}, {0x01F59, 0x01F51}, {0x01F5B, 0x01F53}, {0x01F5D, 0x01F55},
-{0x01F5F, 0x01F57}, {0x01F68, 0x01F60}, {0x01F69, 0x01F61}, {0x01F6A, 0x01F62}, {0x01F6B, 0x01F63}, {0x01F6C, 0x01F64},
-{0x01F6D, 0x01F65}, {0x01F6E, 0x01F66}, {0x01F6F, 0x01F67}, {0x01F88, 0x01F80}, {0x01F89, 0x01F81}, {0x01F8A, 0x01F82},
-{0x01F8B, 0x01F83}, {0x01F8C, 0x01F84}, {0x01F8D, 0x01F85}, {0x01F8E, 0x01F86}, {0x01F8F, 0x01F87}, {0x01F98, 0x01F90},
-{0x01F99, 0x01F91}, {0x01F9A, 0x01F92}, {0x01F9B, 0x01F93}, {0x01F9C, 0x01F94}, {0x01F9D, 0x01F95}, {0x01F9E, 0x01F96},
-{0x01F9F, 0x01F97}, {0x01FA8, 0x01FA0}, {0x01FA9, 0x01FA1}, {0x01FAA, 0x01FA2}, {0x01FAB, 0x01FA3}, {0x01FAC, 0x01FA4},
-{0x01FAD, 0x01FA5}, {0x01FAE, 0x01FA6}, {0x01FAF, 0x01FA7}, {0x01FB8, 0x01FB0}, {0x01FB9, 0x01FB1}, {0x01FBA, 0x01F70},
-{0x01FBB, 0x01F71}, {0x01FBC, 0x01FB3}, {0x01FC8, 0x01F72}, {0x01FC9, 0x01F73}, {0x01FCA, 0x01F74}, {0x01FCB, 0x01F75},
-{0x01FCC, 0x01FC3}, {0x01FD8, 0x01FD0}, {0x01FD9, 0x01FD1}, {0x01FDA, 0x01F76}, {0x01FDB, 0x01F77}, {0x01FE8, 0x01FE0},
-{0x01FE9, 0x01FE1}, {0x01FEA, 0x01F7A}, {0x01FEB, 0x01F7B}, {0x01FEC, 0x01FE5}, {0x01FF8, 0x01F78}, {0x01FF9, 0x01F79},
-{0x01FFA, 0x01F7C}, {0x01FFB, 0x01F7D}, {0x01FFC, 0x01FF3}, {0x02126, 0x003C9}, {0x0212A, 0x0006B}, {0x0212B, 0x000E5},
-{0x02132, 0x0214E}, {0x02160, 0x02170}, {0x02161, 0x02171}, {0x02162, 0x02172}, {0x02163, 0x02173}, {0x02164, 0x02174},
-{0x02165, 0x02175}, {0x02166, 0x02176}, {0x02167, 0x02177}, {0x02168, 0x02178}, {0x02169, 0x02179}, {0x0216A, 0x0217A},
-{0x0216B, 0x0217B}, {0x0216C, 0x0217C}, {0x0216D, 0x0217D}, {0x0216E, 0x0217E}, {0x0216F, 0x0217F}, {0x02183, 0x02184},
-{0x024B6, 0x024D0}, {0x024B7, 0x024D1}, {0x024B8, 0x024D2}, {0x024B9, 0x024D3}, {0x024BA, 0x024D4}, {0x024BB, 0x024D5},
-{0x024BC, 0x024D6}, {0x024BD, 0x024D7}, {0x024BE, 0x024D8}, {0x024BF, 0x024D9}, {0x024C0, 0x024DA}, {0x024C1, 0x024DB},
-{0x024C2, 0x024DC}, {0x024C3, 0x024DD}, {0x024C4, 0x024DE}, {0x024C5, 0x024DF}, {0x024C6, 0x024E0}, {0x024C7, 0x024E1},
-{0x024C8, 0x024E2}, {0x024C9, 0x024E3}, {0x024CA, 0x024E4}, {0x024CB, 0x024E5}, {0x024CC, 0x024E6}, {0x024CD, 0x024E7},
-{0x024CE, 0x024E8}, {0x024CF, 0x024E9}, {0x02C00, 0x02C30}, {0x02C01, 0x02C31}, {0x02C02, 0x02C32}, {0x02C03, 0x02C33},
-{0x02C04, 0x02C34}, {0x02C05, 0x02C35}, {0x02C06, 0x02C36}, {0x02C07, 0x02C37}, {0x02C08, 0x02C38}, {0x02C09, 0x02C39},
-{0x02C0A, 0x02C3A}, {0x02C0B, 0x02C3B}, {0x02C0C, 0x02C3C}, {0x02C0D, 0x02C3D}, {0x02C0E, 0x02C3E}, {0x02C0F, 0x02C3F},
-{0x02C10, 0x02C40}, {0x02C11, 0x02C41}, {0x02C12, 0x02C42}, {0x02C13, 0x02C43}, {0x02C14, 0x02C44}, {0x02C15, 0x02C45},
-{0x02C16, 0x02C46}, {0x02C17, 0x02C47}, {0x02C18, 0x02C48}, {0x02C19, 0x02C49}, {0x02C1A, 0x02C4A}, {0x02C1B, 0x02C4B},
-{0x02C1C, 0x02C4C}, {0x02C1D, 0x02C4D}, {0x02C1E, 0x02C4E}, {0x02C1F, 0x02C4F}, {0x02C20, 0x02C50}, {0x02C21, 0x02C51},
-{0x02C22, 0x02C52}, {0x02C23, 0x02C53}, {0x02C24, 0x02C54}, {0x02C25, 0x02C55}, {0x02C26, 0x02C56}, {0x02C27, 0x02C57},
-{0x02C28, 0x02C58}, {0x02C29, 0x02C59}, {0x02C2A, 0x02C5A}, {0x02C2B, 0x02C5B}, {0x02C2C, 0x02C5C}, {0x02C2D, 0x02C5D},
-{0x02C2E, 0x02C5E}, {0x02C2F, 0x02C5F}, {0x02C60, 0x02C61}, {0x02C62, 0x0026B}, {0x02C63, 0x01D7D}, {0x02C64, 0x0027D},
-{0x02C67, 0x02C68}, {0x02C69, 0x02C6A}, {0x02C6B, 0x02C6C}, {0x02C6D, 0x00251}, {0x02C6E, 0x00271}, {0x02C6F, 0x00250},
-{0x02C70, 0x00252}, {0x02C72, 0x02C73}, {0x02C75, 0x02C76}, {0x02C7E, 0x0023F}, {0x02C7F, 0x00240}, {0x02C80, 0x02C81},
-{0x02C82, 0x02C83}, {0x02C84, 0x02C85}, {0x02C86, 0x02C87}, {0x02C88, 0x02C89}, {0x02C8A, 0x02C8B}, {0x02C8C, 0x02C8D},
-{0x02C8E, 0x02C8F}, {0x02C90, 0x02C91}, {0x02C92, 0x02C93}, {0x02C94, 0x02C95}, {0x02C96, 0x02C97}, {0x02C98, 0x02C99},
-{0x02C9A, 0x02C9B}, {0x02C9C, 0x02C9D}, {0x02C9E, 0x02C9F}, {0x02CA0, 0x02CA1}, {0x02CA2, 0x02CA3}, {0x02CA4, 0x02CA5},
-{0x02CA6, 0x02CA7}, {0x02CA8, 0x02CA9}, {0x02CAA, 0x02CAB}, {0x02CAC, 0x02CAD}, {0x02CAE, 0x02CAF}, {0x02CB0, 0x02CB1},
-{0x02CB2, 0x02CB3}, {0x02CB4, 0x02CB5}, {0x02CB6, 0x02CB7}, {0x02CB8, 0x02CB9}, {0x02CBA, 0x02CBB}, {0x02CBC, 0x02CBD},
-{0x02CBE, 0x02CBF}, {0x02CC0, 0x02CC1}, {0x02CC2, 0x02CC3}, {0x02CC4, 0x02CC5}, {0x02CC6, 0x02CC7}, {0x02CC8, 0x02CC9},
-{0x02CCA, 0x02CCB}, {0x02CCC, 0x02CCD}, {0x02CCE, 0x02CCF}, {0x02CD0, 0x02CD1}, {0x02CD2, 0x02CD3}, {0x02CD4, 0x02CD5},
-{0x02CD6, 0x02CD7}, {0x02CD8, 0x02CD9}, {0x02CDA, 0x02CDB}, {0x02CDC, 0x02CDD}, {0x02CDE, 0x02CDF}, {0x02CE0, 0x02CE1},
-{0x02CE2, 0x02CE3}, {0x02CEB, 0x02CEC}, {0x02CED, 0x02CEE}, {0x02CF2, 0x02CF3}, {0x0A640, 0x0A641}, {0x0A642, 0x0A643},
-{0x0A644, 0x0A645}, {0x0A646, 0x0A647}, {0x0A648, 0x0A649}, {0x0A64A, 0x0A64B}, {0x0A64C, 0x0A64D}, {0x0A64E, 0x0A64F},
-{0x0A650, 0x0A651}, {0x0A652, 0x0A653}, {0x0A654, 0x0A655}, {0x0A656, 0x0A657}, {0x0A658, 0x0A659}, {0x0A65A, 0x0A65B},
-{0x0A65C, 0x0A65D}, {0x0A65E, 0x0A65F}, {0x0A660, 0x0A661}, {0x0A662, 0x0A663}, {0x0A664, 0x0A665}, {0x0A666, 0x0A667},
-{0x0A668, 0x0A669}, {0x0A66A, 0x0A66B}, {0x0A66C, 0x0A66D}, {0x0A680, 0x0A681}, {0x0A682, 0x0A683}, {0x0A684, 0x0A685},
-{0x0A686, 0x0A687}, {0x0A688, 0x0A689}, {0x0A68A, 0x0A68B}, {0x0A68C, 0x0A68D}, {0x0A68E, 0x0A68F}, {0x0A690, 0x0A691},
-{0x0A692, 0x0A693}, {0x0A694, 0x0A695}, {0x0A696, 0x0A697}, {0x0A698, 0x0A699}, {0x0A69A, 0x0A69B}, {0x0A722, 0x0A723},
-{0x0A724, 0x0A725}, {0x0A726, 0x0A727}, {0x0A728, 0x0A729}, {0x0A72A, 0x0A72B}, {0x0A72C, 0x0A72D}, {0x0A72E, 0x0A72F},
-{0x0A732, 0x0A733}, {0x0A734, 0x0A735}, {0x0A736, 0x0A737}, {0x0A738, 0x0A739}, {0x0A73A, 0x0A73B}, {0x0A73C, 0x0A73D},
-{0x0A73E, 0x0A73F}, {0x0A740, 0x0A741}, {0x0A742, 0x0A743}, {0x0A744, 0x0A745}, {0x0A746, 0x0A747}, {0x0A748, 0x0A749},
-{0x0A74A, 0x0A74B}, {0x0A74C, 0x0A74D}, {0x0A74E, 0x0A74F}, {0x0A750, 0x0A751}, {0x0A752, 0x0A753}, {0x0A754, 0x0A755},
-{0x0A756, 0x0A757}, {0x0A758, 0x0A759}, {0x0A75A, 0x0A75B}, {0x0A75C, 0x0A75D}, {0x0A75E, 0x0A75F}, {0x0A760, 0x0A761},
-{0x0A762, 0x0A763}, {0x0A764, 0x0A765}, {0x0A766, 0x0A767}, {0x0A768, 0x0A769}, {0x0A76A, 0x0A76B}, {0x0A76C, 0x0A76D},
-{0x0A76E, 0x0A76F}, {0x0A779, 0x0A77A}, {0x0A77B, 0x0A77C}, {0x0A77D, 0x01D79}, {0x0A77E, 0x0A77F}, {0x0A780, 0x0A781},
-{0x0A782, 0x0A783}, {0x0A784, 0x0A785}, {0x0A786, 0x0A787}, {0x0A78B, 0x0A78C}, {0x0A78D, 0x00265}, {0x0A790, 0x0A791},
-{0x0A792, 0x0A793}, {0x0A796, 0x0A797}, {0x0A798, 0x0A799}, {0x0A79A, 0x0A79B}, {0x0A79C, 0x0A79D}, {0x0A79E, 0x0A79F},
-{0x0A7A0, 0x0A7A1}, {0x0A7A2, 0x0A7A3}, {0x0A7A4, 0x0A7A5}, {0x0A7A6, 0x0A7A7}, {0x0A7A8, 0x0A7A9}, {0x0A7AA, 0x00266},
-{0x0A7AB, 0x0025C}, {0x0A7AC, 0x00261}, {0x0A7AD, 0x0026C}, {0x0A7AE, 0x0026A}, {0x0A7B0, 0x0029E}, {0x0A7B1, 0x00287},
-{0x0A7B2, 0x0029D}, {0x0A7B3, 0x0AB53}, {0x0A7B4, 0x0A7B5}, {0x0A7B6, 0x0A7B7}, {0x0A7B8, 0x0A7B9}, {0x0A7BA, 0x0A7BB},
-{0x0A7BC, 0x0A7BD}, {0x0A7BE, 0x0A7BF}, {0x0A7C0, 0x0A7C1}, {0x0A7C2, 0x0A7C3}, {0x0A7C4, 0x0A794}, {0x0A7C5, 0x00282},
-{0x0A7C6, 0x01D8E}, {0x0A7C7, 0x0A7C8}, {0x0A7C9, 0x0A7CA}, {0x0A7D0, 0x0A7D1}, {0x0A7D6, 0x0A7D7}, {0x0A7D8, 0x0A7D9},
-{0x0A7F5, 0x0A7F6}, {0x0FF21, 0x0FF41}, {0x0FF22, 0x0FF42}, {0x0FF23, 0x0FF43}, {0x0FF24, 0x0FF44}, {0x0FF25, 0x0FF45},
-{0x0FF26, 0x0FF46}, {0x0FF27, 0x0FF47}, {0x0FF28, 0x0FF48}, {0x0FF29, 0x0FF49}, {0x0FF2A, 0x0FF4A}, {0x0FF2B, 0x0FF4B},
-{0x0FF2C, 0x0FF4C}, {0x0FF2D, 0x0FF4D}, {0x0FF2E, 0x0FF4E}, {0x0FF2F, 0x0FF4F}, {0x0FF30, 0x0FF50}, {0x0FF31, 0x0FF51},
-{0x0FF32, 0x0FF52}, {0x0FF33, 0x0FF53}, {0x0FF34, 0x0FF54}, {0x0FF35, 0x0FF55}, {0x0FF36, 0x0FF56}, {0x0FF37, 0x0FF57},
-{0x0FF38, 0x0FF58}, {0x0FF39, 0x0FF59}, {0x0FF3A, 0x0FF5A}, {0x10400, 0x10428}, {0x10401, 0x10429}, {0x10402, 0x1042A},
-{0x10403, 0x1042B}, {0x10404, 0x1042C}, {0x10405, 0x1042D}, {0x10406, 0x1042E}, {0x10407, 0x1042F}, {0x10408, 0x10430},
-{0x10409, 0x10431}, {0x1040A, 0x10432}, {0x1040B, 0x10433}, {0x1040C, 0x10434}, {0x1040D, 0x10435}, {0x1040E, 0x10436},
-{0x1040F, 0x10437}, {0x10410, 0x10438}, {0x10411, 0x10439}, {0x10412, 0x1043A}, {0x10413, 0x1043B}, {0x10414, 0x1043C},
-{0x10415, 0x1043D}, {0x10416, 0x1043E}, {0x10417, 0x1043F}, {0x10418, 0x10440}, {0x10419, 0x10441}, {0x1041A, 0x10442},
-{0x1041B, 0x10443}, {0x1041C, 0x10444}, {0x1041D, 0x10445}, {0x1041E, 0x10446}, {0x1041F, 0x10447}, {0x10420, 0x10448},
-{0x10421, 0x10449}, {0x10422, 0x1044A}, {0x10423, 0x1044B}, {0x10424, 0x1044C}, {0x10425, 0x1044D}, {0x10426, 0x1044E},
-{0x10427, 0x1044F}, {0x104B0, 0x104D8}, {0x104B1, 0x104D9}, {0x104B2, 0x104DA}, {0x104B3, 0x104DB}, {0x104B4, 0x104DC},
-{0x104B5, 0x104DD}, {0x104B6, 0x104DE}, {0x104B7, 0x104DF}, {0x104B8, 0x104E0}, {0x104B9, 0x104E1}, {0x104BA, 0x104E2},
-{0x104BB, 0x104E3}, {0x104BC, 0x104E4}, {0x104BD, 0x104E5}, {0x104BE, 0x104E6}, {0x104BF, 0x104E7}, {0x104C0, 0x104E8},
-{0x104C1, 0x104E9}, {0x104C2, 0x104EA}, {0x104C3, 0x104EB}, {0x104C4, 0x104EC}, {0x104C5, 0x104ED}, {0x104C6, 0x104EE},
-{0x104C7, 0x104EF}, {0x104C8, 0x104F0}, {0x104C9, 0x104F1}, {0x104CA, 0x104F2}, {0x104CB, 0x104F3}, {0x104CC, 0x104F4},
-{0x104CD, 0x104F5}, {0x104CE, 0x104F6}, {0x104CF, 0x104F7}, {0x104D0, 0x104F8}, {0x104D1, 0x104F9}, {0x104D2, 0x104FA},
-{0x104D3, 0x104FB}, {0x10570, 0x10597}, {0x10571, 0x10598}, {0x10572, 0x10599}, {0x10573, 0x1059A}, {0x10574, 0x1059B},
-{0x10575, 0x1059C}, {0x10576, 0x1059D}, {0x10577, 0x1059E}, {0x10578, 0x1059F}, {0x10579, 0x105A0}, {0x1057A, 0x105A1},
-{0x1057C, 0x105A3}, {0x1057D, 0x105A4}, {0x1057E, 0x105A5}, {0x1057F, 0x105A6}, {0x10580, 0x105A7}, {0x10581, 0x105A8},
-{0x10582, 0x105A9}, {0x10583, 0x105AA}, {0x10584, 0x105AB}, {0x10585, 0x105AC}, {0x10586, 0x105AD}, {0x10587, 0x105AE},
-{0x10588, 0x105AF}, {0x10589, 0x105B0}, {0x1058A, 0x105B1}, {0x1058C, 0x105B3}, {0x1058D, 0x105B4}, {0x1058E, 0x105B5},
-{0x1058F, 0x105B6}, {0x10590, 0x105B7}, {0x10591, 0x105B8}, {0x10592, 0x105B9}, {0x10594, 0x105BB}, {0x10595, 0x105BC},
-{0x10C80, 0x10CC0}, {0x10C81, 0x10CC1}, {0x10C82, 0x10CC2}, {0x10C83, 0x10CC3}, {0x10C84, 0x10CC4}, {0x10C85, 0x10CC5},
-{0x10C86, 0x10CC6}, {0x10C87, 0x10CC7}, {0x10C88, 0x10CC8}, {0x10C89, 0x10CC9}, {0x10C8A, 0x10CCA}, {0x10C8B, 0x10CCB},
-{0x10C8C, 0x10CCC}, {0x10C8D, 0x10CCD}, {0x10C8E, 0x10CCE}, {0x10C8F, 0x10CCF}, {0x10C90, 0x10CD0}, {0x10C91, 0x10CD1},
-{0x10C92, 0x10CD2}, {0x10C93, 0x10CD3}, {0x10C94, 0x10CD4}, {0x10C95, 0x10CD5}, {0x10C96, 0x10CD6}, {0x10C97, 0x10CD7},
-{0x10C98, 0x10CD8}, {0x10C99, 0x10CD9}, {0x10C9A, 0x10CDA}, {0x10C9B, 0x10CDB}, {0x10C9C, 0x10CDC}, {0x10C9D, 0x10CDD},
-{0x10C9E, 0x10CDE}, {0x10C9F, 0x10CDF}, {0x10CA0, 0x10CE0}, {0x10CA1, 0x10CE1}, {0x10CA2, 0x10CE2}, {0x10CA3, 0x10CE3},
-{0x10CA4, 0x10CE4}, {0x10CA5, 0x10CE5}, {0x10CA6, 0x10CE6}, {0x10CA7, 0x10CE7}, {0x10CA8, 0x10CE8}, {0x10CA9, 0x10CE9},
-{0x10CAA, 0x10CEA}, {0x10CAB, 0x10CEB}, {0x10CAC, 0x10CEC}, {0x10CAD, 0x10CED}, {0x10CAE, 0x10CEE}, {0x10CAF, 0x10CEF},
-{0x10CB0, 0x10CF0}, {0x10CB1, 0x10CF1}, {0x10CB2, 0x10CF2}, {0x118A0, 0x118C0}, {0x118A1, 0x118C1}, {0x118A2, 0x118C2},
-{0x118A3, 0x118C3}, {0x118A4, 0x118C4}, {0x118A5, 0x118C5}, {0x118A6, 0x118C6}, {0x118A7, 0x118C7}, {0x118A8, 0x118C8},
-{0x118A9, 0x118C9}, {0x118AA, 0x118CA}, {0x118AB, 0x118CB}, {0x118AC, 0x118CC}, {0x118AD, 0x118CD}, {0x118AE, 0x118CE},
-{0x118AF, 0x118CF}, {0x118B0, 0x118D0}, {0x118B1, 0x118D1}, {0x118B2, 0x118D2}, {0x118B3, 0x118D3}, {0x118B4, 0x118D4},
-{0x118B5, 0x118D5}, {0x118B6, 0x118D6}, {0x118B7, 0x118D7}, {0x118B8, 0x118D8}, {0x118B9, 0x118D9}, {0x118BA, 0x118DA},
-{0x118BB, 0x118DB}, {0x118BC, 0x118DC}, {0x118BD, 0x118DD}, {0x118BE, 0x118DE}, {0x118BF, 0x118DF}, {0x16E40, 0x16E60},
-{0x16E41, 0x16E61}, {0x16E42, 0x16E62}, {0x16E43, 0x16E63}, {0x16E44, 0x16E64}, {0x16E45, 0x16E65}, {0x16E46, 0x16E66},
-{0x16E47, 0x16E67}, {0x16E48, 0x16E68}, {0x16E49, 0x16E69}, {0x16E4A, 0x16E6A}, {0x16E4B, 0x16E6B}, {0x16E4C, 0x16E6C},
-{0x16E4D, 0x16E6D}, {0x16E4E, 0x16E6E}, {0x16E4F, 0x16E6F}, {0x16E50, 0x16E70}, {0x16E51, 0x16E71}, {0x16E52, 0x16E72},
-{0x16E53, 0x16E73}, {0x16E54, 0x16E74}, {0x16E55, 0x16E75}, {0x16E56, 0x16E76}, {0x16E57, 0x16E77}, {0x16E58, 0x16E78},
-{0x16E59, 0x16E79}, {0x16E5A, 0x16E7A}, {0x16E5B, 0x16E7B}, {0x16E5C, 0x16E7C}, {0x16E5D, 0x16E7D}, {0x16E5E, 0x16E7E},
-{0x16E5F, 0x16E7F}, {0x1E900, 0x1E922}, {0x1E901, 0x1E923}, {0x1E902, 0x1E924}, {0x1E903, 0x1E925}, {0x1E904, 0x1E926},
-{0x1E905, 0x1E927}, {0x1E906, 0x1E928}, {0x1E907, 0x1E929}, {0x1E908, 0x1E92A}, {0x1E909, 0x1E92B}, {0x1E90A, 0x1E92C},
-{0x1E90B, 0x1E92D}, {0x1E90C, 0x1E92E}, {0x1E90D, 0x1E92F}, {0x1E90E, 0x1E930}, {0x1E90F, 0x1E931}, {0x1E910, 0x1E932},
-{0x1E911, 0x1E933}, {0x1E912, 0x1E934}, {0x1E913, 0x1E935}, {0x1E914, 0x1E936}, {0x1E915, 0x1E937}, {0x1E916, 0x1E938},
-{0x1E917, 0x1E939}, {0x1E918, 0x1E93A}, {0x1E919, 0x1E93B}, {0x1E91A, 0x1E93C}, {0x1E91B, 0x1E93D}, {0x1E91C, 0x1E93E},
-{0x1E91D, 0x1E93F}, {0x1E91E, 0x1E940}, {0x1E91F, 0x1E941}, {0x1E920, 0x1E942}, {0x1E921, 0x1E943},
-};
diff --git a/unicode-data.h b/unicode-data.h
index cb9dd8aa5..3cccf2068 100644
--- a/unicode-data.h
+++ b/unicode-data.h
@@ -5,8 +5,9 @@
#include
#include
-extern const std::vector> unicode_ranges_digit;
+extern const std::vector> unicode_ranges_number;
extern const std::vector> unicode_ranges_letter;
+extern const std::vector> unicode_ranges_separator;
extern const std::vector> unicode_ranges_whitespace;
extern const std::vector> unicode_ranges_accent_mark;
extern const std::vector> unicode_ranges_punctuation;
diff --git a/unicode.cpp b/unicode.cpp
index f2ccda05f..ca03c49d3 100644
--- a/unicode.cpp
+++ b/unicode.cpp
@@ -9,6 +9,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -110,28 +111,28 @@ static uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset)
static std::unordered_map unicode_cpt_type_map() {
std::unordered_map cpt_types;
- for (auto p : unicode_ranges_digit) {
- for (auto i = p.first; i <= p.second; ++ i) {
- cpt_types[i] = CODEPOINT_TYPE_DIGIT;
+ for (auto p : unicode_ranges_number) {
+ for (auto i = p.first; i <= p.second; ++i) {
+ cpt_types[i] = CODEPOINT_TYPE_NUMBER;
}
}
for (auto p : unicode_ranges_letter) {
- for (auto i = p.first; i <= p.second; ++ i) {
+ for (auto i = p.first; i <= p.second; ++i) {
cpt_types[i] = CODEPOINT_TYPE_LETTER;
}
}
- for (auto p : unicode_ranges_whitespace) {
- for (auto i = p.first; i <= p.second; ++ i) {
- cpt_types[i] = CODEPOINT_TYPE_WHITESPACE;
+ for (auto p : unicode_ranges_separator) {
+ for (auto i = p.first; i <= p.second; ++i) {
+ cpt_types[i] = CODEPOINT_TYPE_SEPARATOR;
}
}
for (auto p : unicode_ranges_accent_mark) {
- for (auto i = p.first; i <= p.second; ++ i) {
+ for (auto i = p.first; i <= p.second; ++i) {
cpt_types[i] = CODEPOINT_TYPE_ACCENT_MARK;
}
}
for (auto p : unicode_ranges_punctuation) {
- for (auto i = p.first; i <= p.second; ++ i) {
+ for (auto i = p.first; i <= p.second; ++i) {
cpt_types[i] = CODEPOINT_TYPE_PUNCTUATION;
}
}
@@ -141,7 +142,7 @@ static std::unordered_map unicode_cpt_type_map() {
}
}
for (auto p : unicode_ranges_control) {
- for (auto i = p.first; i <= p.second; ++ i) {
+ for (auto i = p.first; i <= p.second; ++i) {
cpt_types[i] = CODEPOINT_TYPE_CONTROL;
}
}
@@ -224,138 +225,256 @@ static std::vector unicode_regex_split_custom_gpt2(const std::string & t
std::vector bpe_offsets; // store the offset of each word
bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size
+ const auto cpts = unicode_cpts_from_utf8(text);
+
size_t start = 0;
+ for (auto offset : offsets) {
+ const size_t offset_ini = start;
+ const size_t offset_end = start + offset;
+ assert(offset_end <= cpts.size());
+ start = offset_end;
+
+ auto _get_cpt = [&] (const size_t pos) -> char32_t {
+ return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : 0;
+ };
+
+ auto _get_cpt_type = [&] (const size_t pos) -> int {
+ return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_type(cpts[pos]) : CODEPOINT_TYPE_UNIDENTIFIED;
+ };
+
+ size_t _prev_end = offset_ini;
+ auto _add_token = [&] (const size_t end) -> size_t {
+ assert(_prev_end <= end && end <= offset_end);
+ size_t len = end - _prev_end;
+ if (len > 0) {
+ bpe_offsets.push_back(len);
+ }
+ _prev_end = end;
+ //if (len > 0) {
+ // std::string s = "";
+ // for(size_t p = end-len; p < end; p++)
+ // s += unicode_cpt_to_utf8(cpts[p]);
+ // printf(">>> '%s'\n", s.c_str());
+ //}
+ return len;
+ };
+
+ for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) {
+ const char32_t cpt = _get_cpt(pos);
+ const int cpt_type = _get_cpt_type(pos);
+
+ // regex: 's|'t|'re|'ve|'m|'ll|'d
+ if (cpt == '\'' && pos+1 < offset_end) {
+ char32_t cpt_next = _get_cpt(pos+1);
+ if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') {
+ pos += _add_token(pos+2);
+ continue;
+ }
+ if (pos+2 < offset_end) {
+ char32_t cpt_next_next = _get_cpt(pos+2);
+ if ((cpt_next == 'r' && cpt_next_next == 'e') ||
+ (cpt_next == 'v' && cpt_next_next == 'e') ||
+ (cpt_next == 'l' && cpt_next_next == 'l')) {
+ pos += _add_token(pos+3);
+ continue;
+ }
+ }
+ }
+
+ char32_t cpt2 = (cpt == ' ' ? _get_cpt(pos+1) : cpt);
+ int cpt2_type = (cpt == ' ' ? _get_cpt_type(pos+1) : cpt_type);
+ // regex: ?\p{L}+
+ if (cpt2_type == CODEPOINT_TYPE_LETTER) {
+ pos += (cpt == ' ');
+ while (cpt2_type == CODEPOINT_TYPE_LETTER) {
+ cpt2_type = _get_cpt_type(++pos);
+ }
+ _add_token(pos);
+ continue;
+ }
+ // regex: ?\p{N}+
+ if (cpt2_type == CODEPOINT_TYPE_NUMBER) {
+ pos += (cpt == ' ');
+ while (cpt2_type == CODEPOINT_TYPE_NUMBER) {
+ cpt2_type = _get_cpt_type(++pos);
+ }
+ _add_token(pos);
+ continue;
+ }
+ // regex: ?[^\s\p{L}\p{N}]+
+ if (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
+ pos += (cpt == ' ');
+ while (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
+ cpt2_type = _get_cpt_type(++pos);
+ cpt2 = _get_cpt(pos);
+ }
+ _add_token(pos);
+ continue;
+ }
+
+ size_t num_whitespaces = 0;
+ while (unicode_cpt_is_whitespace(_get_cpt(pos+num_whitespaces))) {
+ num_whitespaces++;
+ }
+
+ // regex: \s+(?!\S)
+ if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != 0) {
+ pos += num_whitespaces - 1;
+ _add_token(pos);
+ continue;
+ }
+
+ // regex: \s+
+ if (num_whitespaces > 0) {
+ pos += num_whitespaces;
+ _add_token(pos);
+ continue;
+ }
+
+ // no matches
+ _add_token(++pos);
+ }
+ }
+
+ return bpe_offsets;
+}
+
+// LLAMA3 system regex: "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"
+static std::vector unicode_regex_split_custom_llama3(const std::string & text, const std::vector & offsets) {
+ std::vector bpe_offsets; // store the offset of each word
+ bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size
const auto cpts = unicode_cpts_from_utf8(text);
+ size_t start = 0;
for (auto offset : offsets) {
- std::string token;
+ const size_t offset_ini = start;
+ const size_t offset_end = start + offset;
+ assert(offset_end <= cpts.size());
+ start = offset_end;
- bool collecting_numeric = false;
- bool collecting_letter = false;
- bool collecting_special = false;
- bool collecting_whitespace_lookahead = false;
- bool collecting = false;
+ auto _get_cpt = [&] (const size_t pos) -> char32_t {
+ return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : 0;
+ };
- std::vector text_utf;
- text_utf.reserve(offset);
+ auto _get_cpt_type = [&] (const size_t pos) -> int {
+ return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_type(cpts[pos]) : CODEPOINT_TYPE_UNIDENTIFIED;
+ };
- for (size_t i = start; i < start + offset; ++i) {
- text_utf.emplace_back(unicode_cpt_to_utf8(cpts[i]));
- }
+ size_t _prev_end = offset_ini;
+ auto _add_token = [&] (const size_t end) -> size_t {
+ assert(_prev_end <= end && end <= offset_end);
+ size_t len = end - _prev_end;
+ if (len > 0) {
+ bpe_offsets.push_back(len);
+ }
+ _prev_end = end;
+ //if (len > 0) {
+ // std::string s = "";
+ // for(size_t p = end-len; p < end; p++)
+ // s += unicode_cpt_to_utf8(cpts[p]);
+ // printf(">>> '%s'\n", s.c_str());
+ //}
+ return len;
+ };
- for (int i = 0; i < (int)text_utf.size(); i++) {
- const std::string & utf_char = text_utf[i];
- bool split_condition = false;
- int bytes_remain = text_utf.size() - i;
+ for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) {
+ const char32_t cpt = _get_cpt(pos);
+ const int cpt_type = _get_cpt_type(pos);
- // forward backward lookups
- const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
- const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
-
- // handling contractions
- if (!split_condition && bytes_remain >= 2) {
- // 's|'t|'m|'d
- if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
- split_condition = true;
- }
- if (split_condition) {
- if (token.size()) {
- bpe_offsets.emplace_back(unicode_cpts_from_utf8(token).size());
- }
- token = utf_char + utf_char_next;
- bpe_offsets.emplace_back(unicode_cpts_from_utf8(token).size());
- token = "";
- i++;
+ // regex: (?i:'s|'t|'re|'ve|'m|'ll|'d) // case insensitive
+ if (cpt == '\'' && pos+1 < offset_end) {
+ char32_t cpt_next = unicode_tolower(_get_cpt(pos+1));
+ if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') {
+ pos += _add_token(pos+2);
continue;
}
- }
- if (!split_condition && bytes_remain >= 3) {
- // 're|'ve|'ll
- if (utf_char == "\'" && (
- (utf_char_next == "r" && utf_char_next_next == "e") ||
- (utf_char_next == "v" && utf_char_next_next == "e") ||
- (utf_char_next == "l" && utf_char_next_next == "l"))
- ) {
- split_condition = true;
- }
- if (split_condition) {
- // current token + next token can be defined
- if (token.size()) {
- bpe_offsets.emplace_back(unicode_cpts_from_utf8(token).size());
+ if (pos+2 < offset_end) {
+ char32_t cpt_next_next = unicode_tolower(_get_cpt(pos+2));
+ if ((cpt_next == 'r' && cpt_next_next == 'e') ||
+ (cpt_next == 'v' && cpt_next_next == 'e') ||
+ (cpt_next == 'l' && cpt_next_next == 'l')) {
+ pos += _add_token(pos+3);
+ continue;
}
- token = utf_char;
- token += utf_char_next;
- token += utf_char_next_next;
+ }
+ }
- bpe_offsets.emplace_back(unicode_cpts_from_utf8(token).size());
- token = "";
- i += 2;
+ // regex: [^\r\n\p{L}\p{N}]?\p{L}+ //####FIXME: the first \p{L} is correct?
+ if (cpt != '\r' && cpt != '\n' && /*cpt_type != CODEPOINT_TYPE_LETTER &&*/ cpt_type != CODEPOINT_TYPE_NUMBER) {
+ if (cpt_type == CODEPOINT_TYPE_LETTER || _get_cpt_type(pos+1) == CODEPOINT_TYPE_LETTER) { // one or more letters
+ pos++;
+ while (_get_cpt_type(pos) == CODEPOINT_TYPE_LETTER) {
+ pos++;
+ }
+ _add_token(pos);
continue;
}
}
- if (!split_condition && !collecting) {
- if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_LETTER || (token.empty() && utf_char == " " && unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
- collecting_letter = true;
- collecting = true;
- }
- else if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_DIGIT || (token.empty() && utf_char == " " && unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
- collecting_numeric = true;
- collecting = true;
- }
- else if (
- ((unicode_cpt_type(utf_char) != CODEPOINT_TYPE_LETTER && unicode_cpt_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (unicode_cpt_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
- (token.empty() && utf_char == " " && unicode_cpt_type(utf_char_next) != CODEPOINT_TYPE_LETTER && unicode_cpt_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && unicode_cpt_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
- ) {
- collecting_special = true;
- collecting = true;
- }
- else if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
- collecting_whitespace_lookahead = true;
- collecting = true;
- }
- else if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
- split_condition = true;
- }
- }
- else if (!split_condition && collecting) {
- if (collecting_letter && unicode_cpt_type(utf_char) != CODEPOINT_TYPE_LETTER) {
- split_condition = true;
- }
- else if (collecting_numeric && unicode_cpt_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
- split_condition = true;
- }
- else if (collecting_special && (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_LETTER || unicode_cpt_type(utf_char) == CODEPOINT_TYPE_DIGIT || unicode_cpt_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
- split_condition = true;
- }
- else if (collecting_whitespace_lookahead && (unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_LETTER || unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
- split_condition = true;
+ // regex: \p{N}{1,3}
+ if (cpt_type == CODEPOINT_TYPE_NUMBER) {
+ size_t ini = pos;
+ while (_get_cpt_type(pos) == CODEPOINT_TYPE_NUMBER) {
+ if (++pos - ini >= 3 ) {
+ _add_token(pos);
+ ini = pos;
+ }
}
+ _add_token(pos);
+ continue;
}
- if (utf_char_next == "") {
- split_condition = true; // final
- token += utf_char;
+ // regex: ?[^\s\p{L}\p{N}]+[\r\n]*
+ char32_t cpt2 = (cpt == ' ' ? _get_cpt(pos+1) : cpt);
+ int cpt2_type = (cpt == ' ' ? _get_cpt_type(pos+1) : cpt_type);
+ if (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
+ pos += (cpt == ' ');
+ while (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
+ cpt2_type = _get_cpt_type(++pos);
+ cpt2 = _get_cpt(pos);
+ }
+ while (cpt2 == '\r' || cpt2 == '\n') {
+ cpt2 = _get_cpt(++pos);
+ }
+ _add_token(pos);
+ continue;
}
- if (split_condition) {
- if (token.size()) {
- bpe_offsets.emplace_back(unicode_cpts_from_utf8(token).size());
+ size_t num_whitespaces = 0;
+ size_t last_end_r_or_n = 0;
+ while (unicode_cpt_is_whitespace(_get_cpt(pos+num_whitespaces))) {
+ char32_t cpt2 = _get_cpt(pos+num_whitespaces);
+ if (cpt2 == '\r' || cpt2 == '\n') {
+ last_end_r_or_n = pos + num_whitespaces + 1;
}
- token = utf_char;
- collecting = false;
- collecting_letter = false;
- collecting_numeric = false;
- collecting_special = false;
- collecting_whitespace_lookahead = false;
+ num_whitespaces++;
}
- else {
- token += utf_char;
+
+ // regex: \s*[\r\n]+
+ if (last_end_r_or_n > 0) {
+ pos = last_end_r_or_n;
+ _add_token(pos);
+ continue;
}
+
+ // regex: \s+(?!\S)
+ if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != 0) {
+ pos += num_whitespaces - 1;
+ _add_token(pos);
+ continue;
+ }
+
+ // regex: \s+
+ if (num_whitespaces > 0) {
+ pos += num_whitespaces;
+ _add_token(pos);
+ continue;
+ }
+
+ // no matches
+ _add_token(++pos);
}
-
- start += offset;
}
return bpe_offsets;
@@ -424,14 +543,14 @@ static std::vector unicode_regex_split_stl(const std::string & text, con
static std::vector unicode_regex_split_custom(const std::string & text, const std::string & regex_expr, const std::vector & offsets) {
std::vector bpe_offsets;
- (void)(text);
- (void)(regex_expr);
- (void)(offsets);
- // TODO: this implementation is actually wrong, uncomment and run:
- // make -j && ./bin/test-tokenizer-0 ../models/ggml-vocab-gpt-2.gguf
- //if (regex_expr == "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)") {
- // bpe_offsets = unicode_regex_split_custom_gpt2(text, offsets);
- //}
+ if (regex_expr == "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)") {
+ bpe_offsets = unicode_regex_split_custom_gpt2(text, offsets);
+ } else if (
+ regex_expr == "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" ||
+ regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") {
+
+ bpe_offsets = unicode_regex_split_custom_llama3(text, offsets);
+ }
return bpe_offsets;
}
@@ -506,6 +625,19 @@ int unicode_cpt_type(const std::string & utf8) {
return unicode_cpt_type(unicode_cpt_from_utf8(utf8, offset));
}
+bool unicode_cpt_is_whitespace(uint32_t cp) {
+ static const std::unordered_set is_whitespace = [] {
+ std::unordered_set is_whitespace;
+ for (auto p : unicode_ranges_whitespace) {
+ for (auto i = p.first; i <= p.second; ++i) {
+ is_whitespace.insert(i);
+ }
+ }
+ return is_whitespace;
+ }();
+ return (bool)is_whitespace.count(cp);
+}
+
std::string unicode_byte_to_utf8(uint8_t byte) {
static std::unordered_map map = unicode_byte_to_utf8_map();
return map.at(byte);
@@ -524,19 +656,19 @@ char32_t unicode_tolower(char32_t cp) {
std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs) {
// unicode categories
static const std::map k_ucat_enum = {
- { "\\p{N}", CODEPOINT_TYPE_DIGIT },
+ { "\\p{N}", CODEPOINT_TYPE_NUMBER },
{ "\\p{L}", CODEPOINT_TYPE_LETTER },
{ "\\p{P}", CODEPOINT_TYPE_PUNCTUATION },
};
static const std::map k_ucat_cpt = {
- { CODEPOINT_TYPE_DIGIT, 0xD1 },
+ { CODEPOINT_TYPE_NUMBER, 0xD1 },
{ CODEPOINT_TYPE_LETTER, 0xD2 },
{ CODEPOINT_TYPE_PUNCTUATION, 0xD3 },
};
static const std::map k_ucat_map = {
- { CODEPOINT_TYPE_DIGIT, "\x30-\x39" }, // 0-9
+ { CODEPOINT_TYPE_NUMBER, "\x30-\x39" }, // 0-9
{ CODEPOINT_TYPE_LETTER, "\x41-\x5A\x61-\x7A" }, // A-Za-z
{ CODEPOINT_TYPE_PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\}
};
diff --git a/unicode.h b/unicode.h
index ce2bcef5a..d6a14d470 100644
--- a/unicode.h
+++ b/unicode.h
@@ -5,9 +5,9 @@
#include
#define CODEPOINT_TYPE_UNIDENTIFIED 0
-#define CODEPOINT_TYPE_DIGIT 1
+#define CODEPOINT_TYPE_NUMBER 1
#define CODEPOINT_TYPE_LETTER 2
-#define CODEPOINT_TYPE_WHITESPACE 3
+#define CODEPOINT_TYPE_SEPARATOR 3
#define CODEPOINT_TYPE_ACCENT_MARK 4
#define CODEPOINT_TYPE_PUNCTUATION 5
#define CODEPOINT_TYPE_SYMBOL 6
@@ -21,6 +21,8 @@ std::vector unicode_cpts_normalize_nfd(const std::vector & c
int unicode_cpt_type(uint32_t cp);
int unicode_cpt_type(const std::string & utf8);
+bool unicode_cpt_is_whitespace(uint32_t cp);
+
std::string unicode_byte_to_utf8(uint8_t byte);
uint8_t unicode_utf8_to_byte(const std::string & utf8);