diff --git a/.devops/lamma-cpp-clblast.srpm.spec b/.devops/lamma-cpp-clblast.srpm.spec
new file mode 100644
index 000000000..739c68281
--- /dev/null
+++ b/.devops/lamma-cpp-clblast.srpm.spec
@@ -0,0 +1,58 @@
+# SRPM for building from source and packaging an RPM for RPM-based distros.
+# https://fedoraproject.org/wiki/How_to_create_an_RPM_package
+# Built and maintained by John Boero - boeroboy@gmail.com
+# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
+
+# Notes for llama.cpp:
+# 1. Tags are currently based on hash - which will not sort asciibetically.
+# We need to declare standard versioning if people want to sort latest releases.
+# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
+# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
+# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
+# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
+# It is up to the user to install the correct vendor-specific support.
+
+Name: llama.cpp-clblast
+Version: master
+Release: 1%{?dist}
+Summary: OpenCL Inference of LLaMA model in pure C/C++
+License: MIT
+Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
+BuildRequires: coreutils make gcc-c++ git mesa-libOpenCL-devel
+URL: https://github.com/ggerganov/llama.cpp
+
+%define debug_package %{nil}
+%define source_date_epoch_from_changelog 0
+
+%description
+CPU inference for Meta's Lllama2 models using default options.
+
+%prep
+%setup -n llama.cpp-master
+
+%build
+make -j LLAMA_CLBLAST=1
+
+%install
+mkdir -p %{buildroot}%{_bindir}/
+cp -p main %{buildroot}%{_bindir}/llamacppclblast
+cp -p server %{buildroot}%{_bindir}/llamacppclblastserver
+cp -p simple %{buildroot}%{_bindir}/llamacppclblastsimple
+
+%clean
+rm -rf %{buildroot}
+rm -rf %{_builddir}/*
+
+%files
+%{_bindir}/llamacppclblast
+%{_bindir}/llamacppclblastserver
+%{_bindir}/llamacppclblastsimple
+
+%pre
+
+%post
+
+%preun
+%postun
+
+%changelog
diff --git a/.devops/lamma-cpp-cublas.srpm.spec b/.devops/lamma-cpp-cublas.srpm.spec
new file mode 100644
index 000000000..75d32fbe7
--- /dev/null
+++ b/.devops/lamma-cpp-cublas.srpm.spec
@@ -0,0 +1,59 @@
+# SRPM for building from source and packaging an RPM for RPM-based distros.
+# https://fedoraproject.org/wiki/How_to_create_an_RPM_package
+# Built and maintained by John Boero - boeroboy@gmail.com
+# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
+
+# Notes for llama.cpp:
+# 1. Tags are currently based on hash - which will not sort asciibetically.
+# We need to declare standard versioning if people want to sort latest releases.
+# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
+# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
+# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
+# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
+# It is up to the user to install the correct vendor-specific support.
+
+Name: llama.cpp-cublas
+Version: master
+Release: 1%{?dist}
+Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
+License: MIT
+Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
+BuildRequires: coreutils make gcc-c++ git cuda-toolkit
+Requires: cuda-toolkit
+URL: https://github.com/ggerganov/llama.cpp
+
+%define debug_package %{nil}
+%define source_date_epoch_from_changelog 0
+
+%description
+CPU inference for Meta's Lllama2 models using default options.
+
+%prep
+%setup -n llama.cpp-master
+
+%build
+make -j LLAMA_CUBLAS=1
+
+%install
+mkdir -p %{buildroot}%{_bindir}/
+cp -p main %{buildroot}%{_bindir}/llamacppcublas
+cp -p server %{buildroot}%{_bindir}/llamacppcublasserver
+cp -p simple %{buildroot}%{_bindir}/llamacppcublassimple
+
+%clean
+rm -rf %{buildroot}
+rm -rf %{_builddir}/*
+
+%files
+%{_bindir}/llamacppcublas
+%{_bindir}/llamacppcublasserver
+%{_bindir}/llamacppcublassimple
+
+%pre
+
+%post
+
+%preun
+%postun
+
+%changelog
diff --git a/.devops/llama-cpp.srpm.spec b/.devops/llama-cpp.srpm.spec
new file mode 100644
index 000000000..c65251a5a
--- /dev/null
+++ b/.devops/llama-cpp.srpm.spec
@@ -0,0 +1,58 @@
+# SRPM for building from source and packaging an RPM for RPM-based distros.
+# https://fedoraproject.org/wiki/How_to_create_an_RPM_package
+# Built and maintained by John Boero - boeroboy@gmail.com
+# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
+
+# Notes for llama.cpp:
+# 1. Tags are currently based on hash - which will not sort asciibetically.
+# We need to declare standard versioning if people want to sort latest releases.
+# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
+# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
+# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
+# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
+# It is up to the user to install the correct vendor-specific support.
+
+Name: llama.cpp
+Version: master
+Release: 1%{?dist}
+Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
+License: MIT
+Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
+BuildRequires: coreutils make gcc-c++ git
+URL: https://github.com/ggerganov/llama.cpp
+
+%define debug_package %{nil}
+%define source_date_epoch_from_changelog 0
+
+%description
+CPU inference for Meta's Lllama2 models using default options.
+
+%prep
+%autosetup
+
+%build
+make -j
+
+%install
+mkdir -p %{buildroot}%{_bindir}/
+cp -p main %{buildroot}%{_bindir}/llamacpp
+cp -p server %{buildroot}%{_bindir}/llamacppserver
+cp -p simple %{buildroot}%{_bindir}/llamacppsimple
+
+%clean
+rm -rf %{buildroot}
+rm -rf %{_builddir}/*
+
+%files
+%{_bindir}/llamacpp
+%{_bindir}/llamacppserver
+%{_bindir}/llamacppsimple
+
+%pre
+
+%post
+
+%preun
+%postun
+
+%changelog
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 84faad37a..10320ad1f 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -291,24 +291,32 @@ jobs:
cd build
ctest -C Release --verbose --timeout 900
- - name: Get commit hash
- id: commit
- if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
- uses: pr-mpt/actions-commit-hash@v2
+ - name: Determine tag name
+ id: tag
+ shell: bash
+ run: |
+ BUILD_NUMBER="$(git rev-list --count HEAD)"
+ SHORT_HASH="$(git rev-parse --short=7 HEAD)"
+ if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
+ echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
+ else
+ SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
+ echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
+ fi
- name: Pack artifacts
id: pack_artifacts
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
run: |
Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt
- 7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-x64.zip .\build\bin\Release\*
+ 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip .\build\bin\Release\*
- name: Upload artifacts
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: actions/upload-artifact@v3
with:
path: |
- llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-x64.zip
+ llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip
windows-latest-cmake-cublas:
runs-on: windows-latest
@@ -338,23 +346,31 @@ jobs:
cmake .. -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUBLAS=ON
cmake --build . --config Release
- - name: Get commit hash
- id: commit
- if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
- uses: pr-mpt/actions-commit-hash@v2
+ - name: Determine tag name
+ id: tag
+ shell: bash
+ run: |
+ BUILD_NUMBER="$(git rev-list --count HEAD)"
+ SHORT_HASH="$(git rev-parse --short=7 HEAD)"
+ if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
+ echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
+ else
+ SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
+ echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
+ fi
- name: Pack artifacts
id: pack_artifacts
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
run: |
- 7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip .\build\bin\Release\*
+ 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip .\build\bin\Release\*
- name: Upload artifacts
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: actions/upload-artifact@v3
with:
path: |
- llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip
+ llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip
- name: Copy and pack Cuda runtime
if: ${{ matrix.cuda == '12.1.0' }}
@@ -400,21 +416,34 @@ jobs:
- windows-latest-cmake-cublas
steps:
+ - name: Clone
+ id: checkout
+ uses: actions/checkout@v1
+
+ - name: Determine tag name
+ id: tag
+ shell: bash
+ run: |
+ BUILD_NUMBER="$(git rev-list --count HEAD)"
+ SHORT_HASH="$(git rev-parse --short=7 HEAD)"
+ if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
+ echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
+ else
+ SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
+ echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
+ fi
+
- name: Download artifacts
id: download-artifact
uses: actions/download-artifact@v3
- - name: Get commit hash
- id: commit
- uses: pr-mpt/actions-commit-hash@v2
-
- name: Create release
id: create_release
uses: anzz1/action-create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
- tag_name: ${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}
+ tag_name: ${{ steps.tag.outputs.name }}
- name: Upload release
id: upload_release
diff --git a/.gitignore b/.gitignore
index 5483de6ba..7e0a239ce 100644
--- a/.gitignore
+++ b/.gitignore
@@ -84,4 +84,3 @@ tests/test-quantize-fns
tests/test-quantize-perf
tests/test-sampling
tests/test-tokenizer-0
-
diff --git a/README.md b/README.md
index 82e070ac3..eebb11392 100644
--- a/README.md
+++ b/README.md
@@ -11,15 +11,17 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
### Hot topics
-A new file format has been introduced: [GGUF](https://github.com/ggerganov/llama.cpp/pull/2398)
+- Added support for Falcon models: https://github.com/ggerganov/llama.cpp/pull/2717
-Last revision compatible with the old format: [dadbed9](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
+- A new file format has been introduced: [GGUF](https://github.com/ggerganov/llama.cpp/pull/2398)
-### Current `master` should be considered in Beta - expect some issues for a few days!
+ Last revision compatible with the old format: [dadbed9](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
-### Be prepared to re-convert and / or re-quantize your GGUF models while this notice is up!
+ ### Current `master` should be considered in Beta - expect some issues for a few days!
-### Issues with non-GGUF models will be considered with low priority!
+ ### Be prepared to re-convert and / or re-quantize your GGUF models while this notice is up!
+
+ ### Issues with non-GGUF models will be considered with low priority!
----
@@ -39,6 +41,7 @@ Last revision compatible with the old format: [dadbed9](https://github.com/ggerg
Memory/Disk Requirements
Quantization
Interactive mode
+ Constrained output with grammars
Instruction mode with Alpaca
Using OpenLLaMA
Using GPT4All
@@ -65,12 +68,11 @@ The main goal of `llama.cpp` is to run the LLaMA model using 4-bit integer quant
- Apple silicon first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks
- AVX, AVX2 and AVX512 support for x86 architectures
- Mixed F16 / F32 precision
-- 4-bit, 5-bit and 8-bit integer quantization support
-- Supports OpenBLAS/Apple BLAS/ARM Performance Lib/ATLAS/BLIS/Intel MKL/NVHPC/ACML/SCSL/SGIMATH and [more](https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors) in BLAS
-- cuBLAS and CLBlast support
+- 2-bit, 3-bit, 4-bit, 5-bit, 6-bit and 8-bit integer quantization support
+- CUDA, Metal and OpenCL GPU backend support
The original implementation of `llama.cpp` was [hacked in an evening](https://github.com/ggerganov/llama.cpp/issues/33#issuecomment-1465108022).
-Since then, the project has improved significantly thanks to many contributions. This project is for educational purposes and serves
+Since then, the project has improved significantly thanks to many contributions. This project is mainly for educational purposes and serves
as the main playground for developing new features for the [ggml](https://github.com/ggerganov/ggml) library.
**Supported platforms:**
@@ -84,6 +86,7 @@ as the main playground for developing new features for the [ggml](https://github
- [X] LLaMA 🦙
- [x] LLaMA 2 🦙🦙
+- [X] Falcon
- [X] [Alpaca](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca)
- [X] [GPT4All](https://github.com/ggerganov/llama.cpp#using-gpt4all)
- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)
@@ -114,90 +117,84 @@ as the main playground for developing new features for the [ggml](https://github
---
-Here is a typical run using LLaMA-7B:
+Here is a typical run using LLaMA v2 13B on M2 Ultra:
```java
-make -j && ./main -m ./models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
+$ make -j && ./main -m models/llama-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e
I llama.cpp build info:
I UNAME_S: Darwin
I UNAME_P: arm
I UNAME_M: arm64
-I CFLAGS: -I. -O3 -DNDEBUG -std=c11 -fPIC -pthread -DGGML_USE_ACCELERATE
-I CXXFLAGS: -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC -pthread
+I CFLAGS: -I. -O3 -std=c11 -fPIC -DNDEBUG -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -pthread -DGGML_USE_K_QUANTS -DGGML_USE_ACCELERATE
+I CXXFLAGS: -I. -I./common -O3 -std=c++11 -fPIC -DNDEBUG -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar -pthread -DGGML_USE_K_QUANTS
I LDFLAGS: -framework Accelerate
-I CC: Apple clang version 14.0.0 (clang-1400.0.29.202)
-I CXX: Apple clang version 14.0.0 (clang-1400.0.29.202)
+I CC: Apple clang version 14.0.3 (clang-1403.0.22.14.1)
+I CXX: Apple clang version 14.0.3 (clang-1403.0.22.14.1)
make: Nothing to be done for `default'.
-main: seed = 1678486056
-llama_model_load: loading model from './models/7B/ggml-model-q4_0.bin' - please wait ...
-llama_model_load: n_vocab = 32000
-llama_model_load: n_ctx = 512
-llama_model_load: n_embd = 4096
-llama_model_load: n_mult = 256
-llama_model_load: n_head = 32
-llama_model_load: n_layer = 32
-llama_model_load: n_rot = 128
-llama_model_load: f16 = 2
-llama_model_load: n_ff = 11008
-llama_model_load: ggml ctx size = 4529.34 MB
-llama_model_load: memory_size = 512.00 MB, n_mem = 16384
-llama_model_load: .................................... done
-llama_model_load: model size = 4017.27 MB / num tensors = 291
+main: build = 1041 (cf658ad)
+main: seed = 1692823051
+llama_model_loader: loaded meta data with 16 key-value pairs and 363 tensors from models/llama-13b-v2/ggml-model-q4_0.gguf (version GGUF V1 (latest))
+llama_model_loader: - type f32: 81 tensors
+llama_model_loader: - type q4_0: 281 tensors
+llama_model_loader: - type q6_K: 1 tensors
+llm_load_print_meta: format = GGUF V1 (latest)
+llm_load_print_meta: arch = llama
+llm_load_print_meta: vocab type = SPM
+llm_load_print_meta: n_vocab = 32000
+llm_load_print_meta: n_merges = 0
+llm_load_print_meta: n_ctx_train = 4096
+llm_load_print_meta: n_ctx = 512
+llm_load_print_meta: n_embd = 5120
+llm_load_print_meta: n_head = 40
+llm_load_print_meta: n_head_kv = 40
+llm_load_print_meta: n_layer = 40
+llm_load_print_meta: n_rot = 128
+llm_load_print_meta: n_gqa = 1
+llm_load_print_meta: f_norm_eps = 1.0e-05
+llm_load_print_meta: f_norm_rms_eps = 1.0e-05
+llm_load_print_meta: n_ff = 13824
+llm_load_print_meta: freq_base = 10000.0
+llm_load_print_meta: freq_scale = 1
+llm_load_print_meta: model type = 13B
+llm_load_print_meta: model ftype = mostly Q4_0
+llm_load_print_meta: model size = 13.02 B
+llm_load_print_meta: general.name = LLaMA v2
+llm_load_print_meta: BOS token = 1 ''
+llm_load_print_meta: EOS token = 2 ''
+llm_load_print_meta: UNK token = 0 ''
+llm_load_print_meta: LF token = 13 '<0x0A>'
+llm_load_tensors: ggml ctx size = 0.11 MB
+llm_load_tensors: mem required = 7024.01 MB (+ 400.00 MB per state)
+...................................................................................................
+llama_new_context_with_model: kv self size = 400.00 MB
+llama_new_context_with_model: compute buffer total size = 75.41 MB
-main: prompt: 'Building a website can be done in 10 simple steps:'
-main: number of tokens in prompt = 15
- 1 -> ''
- 8893 -> 'Build'
- 292 -> 'ing'
- 263 -> ' a'
- 4700 -> ' website'
- 508 -> ' can'
- 367 -> ' be'
- 2309 -> ' done'
- 297 -> ' in'
- 29871 -> ' '
- 29896 -> '1'
- 29900 -> '0'
- 2560 -> ' simple'
- 6576 -> ' steps'
- 29901 -> ':'
-
-sampling parameters: temp = 0.800000, top_k = 40, top_p = 0.950000
+system_info: n_threads = 16 / 24 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
+sampling: repeat_last_n = 64, repeat_penalty = 1.100000, presence_penalty = 0.000000, frequency_penalty = 0.000000, top_k = 40, tfs_z = 1.000000, top_p = 0.950000, typical_p = 1.000000, temp = 0.800000, mirostat = 0, mirostat_lr = 0.100000, mirostat_ent = 5.000000
+generate: n_ctx = 512, n_batch = 512, n_predict = 400, n_keep = 0
-Building a website can be done in 10 simple steps:
-1) Select a domain name and web hosting plan
-2) Complete a sitemap
-3) List your products
-4) Write product descriptions
-5) Create a user account
-6) Build the template
-7) Start building the website
-8) Advertise the website
-9) Provide email support
-10) Submit the website to search engines
-A website is a collection of web pages that are formatted with HTML. HTML is the code that defines what the website looks like and how it behaves.
-The HTML code is formatted into a template or a format. Once this is done, it is displayed on the user's browser.
-The web pages are stored in a web server. The web server is also called a host. When the website is accessed, it is retrieved from the server and displayed on the user's computer.
-A website is known as a website when it is hosted. This means that it is displayed on a host. The host is usually a web server.
-A website can be displayed on different browsers. The browsers are basically the software that renders the website on the user's screen.
-A website can also be viewed on different devices such as desktops, tablets and smartphones.
-Hence, to have a website displayed on a browser, the website must be hosted.
-A domain name is an address of a website. It is the name of the website.
-The website is known as a website when it is hosted. This means that it is displayed on a host. The host is usually a web server.
-A website can be displayed on different browsers. The browsers are basically the software that renders the website on the user’s screen.
-A website can also be viewed on different devices such as desktops, tablets and smartphones. Hence, to have a website displayed on a browser, the website must be hosted.
-A domain name is an address of a website. It is the name of the website.
-A website is an address of a website. It is a collection of web pages that are formatted with HTML. HTML is the code that defines what the website looks like and how it behaves.
-The HTML code is formatted into a template or a format. Once this is done, it is displayed on the user’s browser.
-A website is known as a website when it is hosted
-
-main: mem per token = 14434244 bytes
-main: load time = 1332.48 ms
-main: sample time = 1081.40 ms
-main: predict time = 31378.77 ms / 61.41 ms per token
-main: total time = 34036.74 ms
+ Building a website can be done in 10 simple steps:
+Step 1: Find the right website platform.
+Step 2: Choose your domain name and hosting plan.
+Step 3: Design your website layout.
+Step 4: Write your website content and add images.
+Step 5: Install security features to protect your site from hackers or spammers
+Step 6: Test your website on multiple browsers, mobile devices, operating systems etc…
+Step 7: Test it again with people who are not related to you personally – friends or family members will work just fine!
+Step 8: Start marketing and promoting the website via social media channels or paid ads
+Step 9: Analyze how many visitors have come to your site so far, what type of people visit more often than others (e.g., men vs women) etc…
+Step 10: Continue to improve upon all aspects mentioned above by following trends in web design and staying up-to-date on new technologies that can enhance user experience even further!
+How does a Website Work?
+A website works by having pages, which are made of HTML code. This code tells your computer how to display the content on each page you visit – whether it’s an image or text file (like PDFs). In order for someone else’s browser not only be able but also want those same results when accessing any given URL; some additional steps need taken by way of programming scripts that will add functionality such as making links clickable!
+The most common type is called static HTML pages because they remain unchanged over time unless modified manually (either through editing files directly or using an interface such as WordPress). They are usually served up via HTTP protocols – this means anyone can access them without having any special privileges like being part of a group who is allowed into restricted areas online; however, there may still exist some limitations depending upon where one lives geographically speaking.
+How to
+llama_print_timings: load time = 576.45 ms
+llama_print_timings: sample time = 283.10 ms / 400 runs ( 0.71 ms per token, 1412.91 tokens per second)
+llama_print_timings: prompt eval time = 599.83 ms / 19 tokens ( 31.57 ms per token, 31.68 tokens per second)
+llama_print_timings: eval time = 24513.59 ms / 399 runs ( 61.44 ms per token, 16.28 tokens per second)
+llama_print_timings: total time = 25431.49 ms
```
And here is another demo of running both LLaMA-7B and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) on a single M1 Pro MacBook:
@@ -542,6 +539,8 @@ As the models are currently fully loaded into memory, you will need adequate dis
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
+*(outdated)*
+
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|
| 7B | perplexity | 5.9066 | 6.1565 | 6.0912 | 5.9862 | 5.9481 | 5.9070 |
@@ -604,6 +603,16 @@ PROMPT_TEMPLATE=./prompts/chat-with-bob.txt PROMPT_CACHE_FILE=bob.prompt.bin \
CHAT_SAVE_DIR=./chat/bob ./examples/chat-persistent.sh
```
+### Constrained output with grammars
+
+`llama.cpp` supports grammars to constrain model output. For example, you can force the model to output JSON only:
+
+```bash
+./main -m ./models/13B/ggml-model-q4_0.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:'
+```
+
+The `grammars/` folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](./grammars/README.md).
+
### Instruction mode with Alpaca
1. First, download the `ggml` Alpaca model into the `./models` folder
@@ -885,3 +894,4 @@ docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /
- [BLIS](./docs/BLIS.md)
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
+- [GBNF grammars](./grammars/README.md)
diff --git a/ci/run.sh b/ci/run.sh
old mode 100644
new mode 100755
diff --git a/common/common.cpp b/common/common.cpp
index 2a83b379e..53002ba30 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -417,6 +417,18 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
params.antiprompt.push_back(argv[i]);
} else if (arg == "--perplexity") {
params.perplexity = true;
+ } else if (arg == "--ppl-stride") {
+ if (++i >= argc) {
+ invalid_param = true;
+ break;
+ }
+ params.ppl_stride = std::stoi(argv[i]);
+ } else if (arg == "--ppl-output-type") {
+ if (++i >= argc) {
+ invalid_param = true;
+ break;
+ }
+ params.ppl_output_type = std::stoi(argv[i]);
} else if (arg == "--hellaswag") {
params.hellaswag = true;
} else if (arg == "--hellaswag-tasks") {
@@ -732,35 +744,3 @@ std::string llama_token_to_str(const struct llama_context * ctx, llama_token tok
return std::string(result.data(), result.size());
}
-
-std::vector llama_tokenize_bpe(
- struct llama_context * ctx,
- const std::string & text,
- bool add_bos) {
- int n_tokens = text.length() + add_bos;
- std::vector result(n_tokens);
- n_tokens = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
- if (n_tokens < 0) {
- result.resize(-n_tokens);
- int check = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
- GGML_ASSERT(check == -n_tokens);
- } else {
- result.resize(n_tokens);
- }
- return result;
-}
-
-std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token) {
- std::vector result(8, 0);
- const int n_tokens = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
- if (n_tokens < 0) {
- result.resize(-n_tokens);
- const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
- GGML_ASSERT(check == -n_tokens);
- } else {
- result.resize(n_tokens);
- }
-
- return std::string(result.data(), result.size());
-}
-
diff --git a/common/common.h b/common/common.h
index 18fd951ea..17d271e67 100644
--- a/common/common.h
+++ b/common/common.h
@@ -64,6 +64,10 @@ struct gpt_params {
std::string lora_adapter = ""; // lora adapter path
std::string lora_base = ""; // base model path for the lora adapter
+ int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
+ int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
+ // (which is more convenient to use for plotting)
+ //
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
@@ -116,15 +120,6 @@ std::vector llama_tokenize(
const std::string & text,
bool add_bos);
-std::vector llama_tokenize_bpe(
- struct llama_context * ctx,
- const std::string & text,
- bool add_bos);
-
std::string llama_token_to_str(
const struct llama_context * ctx,
llama_token token);
-
-std::string llama_token_to_str_bpe(
- const struct llama_context * ctx,
- llama_token token);
diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py
old mode 100644
new mode 100755
index b3e190a0f..411cbf682
--- a/convert-falcon-hf-to-gguf.py
+++ b/convert-falcon-hf-to-gguf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# HF falcon--> gguf conversion
import gguf
@@ -94,21 +95,27 @@ print("gguf: get model metadata")
block_count = hparams["n_layer"]
-gguf_writer.add_name(last_dir)
+gguf_writer.add_name("Falcon")
gguf_writer.add_context_length(2048) # not in config.json
gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
gguf_writer.add_embedding_length(hparams["hidden_size"])
gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"])
gguf_writer.add_block_count(block_count)
gguf_writer.add_head_count(hparams["n_head"])
-if "n_head_kv" in hparams: gguf_writer.add_head_count_kv(hparams["n_head_kv"])
+if "n_head_kv" in hparams:
+ gguf_writer.add_head_count_kv(hparams["n_head_kv"])
+else:
+ gguf_writer.add_head_count_kv(1)
gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"])
+gguf_writer.add_file_type(ftype)
# TOKENIZATION
print("gguf: get tokenizer metadata")
tokens: List[str] = []
+scores: List[float] = []
+toktypes: List[int] = []
merges: List[str] = []
@@ -152,41 +159,30 @@ if Path(dir_model + "/tokenizer.json").is_file():
text = bytearray(pad_token)
tokens.append(text)
+ scores.append(0.0) # dymmy
+ toktypes.append(gguf.TokenType.NORMAL) # dummy
gguf_writer.add_token_list(tokens)
+ gguf_writer.add_token_scores(scores)
+ gguf_writer.add_token_types(toktypes)
- if "added_tokens" in tokenizer_json and Path(dir_model + "/tokenizer_config.json").is_file():
- print("gguf: get special token ids")
+print("gguf: get special token ids")
+# Look for special tokens in config.json
- with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f:
- tokenizer_config = json.load(f)
+if "bos_token_id" in hparams and hparams["bos_token_id"] != None:
+ gguf_writer.add_bos_token_id(hparams["bos_token_id"])
- # find special token ids
+if "eos_token_id" in hparams and hparams["eos_token_id"] != None:
+ gguf_writer.add_eos_token_id(hparams["eos_token_id"])
- if "bos_token" in tokenizer_config:
- for key in tokenizer_json["added_tokens"]:
- if key["content"] == tokenizer_config["bos_token"]:
- gguf_writer.add_bos_token_id(key["id"])
+if "unk_token_id" in hparams and hparams["unk_token_id"] != None:
+ gguf_writer.add_unk_token_id(hparams["unk_token_id"])
- if "eos_token" in tokenizer_config:
- for key in tokenizer_json["added_tokens"]:
- if key["content"] == tokenizer_config["eos_token"]:
- gguf_writer.add_eos_token_id(key["id"])
+if "sep_token_id" in hparams and hparams["sep_token_id"] != None:
+ gguf_writer.add_sep_token_id(hparams["sep_token_id"])
- if "unk_token" in tokenizer_config:
- for key in tokenizer_json["added_tokens"]:
- if key["content"] == tokenizer_config["unk_token"]:
- gguf_writer.add_unk_token_id(key["id"])
-
- if "sep_token" in tokenizer_config:
- for key in tokenizer_json["added_tokens"]:
- if key["content"] == tokenizer_config["sep_token"]:
- gguf_writer.add_sep_token_id(key["id"])
-
- if "pad_token" in tokenizer_config:
- for key in tokenizer_json["added_tokens"]:
- if key["content"] == tokenizer_config["pad_token"]:
- gguf_writer.add_pad_token_id(key["id"])
+if "pad_token_id" in hparams and hparams["pad_token_id"] != None:
+ gguf_writer.add_pad_token_id(hparams["pad_token_id"])
# TENSORS
@@ -194,8 +190,9 @@ if Path(dir_model + "/tokenizer.json").is_file():
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
# params for qkv transform
-n_head = hparams["n_head"]
+n_head = hparams["n_head"]
n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1
+
head_dim = hparams["hidden_size"] // n_head
# tensor info
diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py
old mode 100644
new mode 100755
index a7cefc6f3..6eeff5bb1
--- a/convert-gptneox-hf-to-gguf.py
+++ b/convert-gptneox-hf-to-gguf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# HF gptneox--> gguf conversion
import gguf
diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py
old mode 100644
new mode 100755
index ab5c80b69..f103f5f61
--- a/convert-llama-7b-pth-to-gguf.py
+++ b/convert-llama-7b-pth-to-gguf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# 7b pth llama --> gguf conversion
# Only models with a single datafile are supported, like 7B
# HF files required in the model dir: config.json tokenizer_config.json tokenizer.json tokenizer.model
diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py
old mode 100644
new mode 100755
index fa4a044ca..3bf93627d
--- a/convert-llama-ggmlv3-to-gguf.py
+++ b/convert-llama-ggmlv3-to-gguf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import sys, struct, math, argparse
from pathlib import Path
@@ -93,7 +94,7 @@ class Tensor:
pad = ((offset + 31) & ~31) - offset
offset += pad
n_elems = np.prod(self.dims)
- n_bytes = (n_elems * tysize) // blksize
+ n_bytes = np.int64(np.int64(n_elems) * np.int64(tysize)) // np.int64(blksize)
self.start_offset = offset
self.len_bytes = n_bytes
offset += n_bytes
@@ -215,15 +216,10 @@ class GGMLToGGUF:
if self.vocab_override is not None:
vo = self.vocab_override
print('* Adding vocab item(s)')
- for (idx, vitem) in enumerate(vo.all_tokens()):
- if len(vitem) == 3:
- tokens.append(vitem[0])
- scores.append(vitem[1])
- toktypes.append(vitem[2])
- else:
- # Maybe try to guess the token type here?
- tokens.append(vitem[0])
- scores.append(vitem[1])
+ for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
+ tokens.append(vbytes)
+ scores.append(score)
+ toktypes.append(ttype)
assert len(tokens) == hp.n_vocab, f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}'
gguf_writer.add_token_list(tokens)
gguf_writer.add_token_scores(scores)
@@ -231,9 +227,21 @@ class GGMLToGGUF:
gguf_writer.add_token_types(toktypes)
return
print(f'* Adding {hp.n_vocab} vocab item(s)')
+ assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab'
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
tt = 1 # Normal
- if len(vbytes) == 0:
+ # Special handling for UNK, BOS, EOS tokens.
+ if tokid <= 2:
+ if tokid == 0:
+ vbytes = b''
+ tt = 2
+ elif tokid == 1:
+ vbytes = b''
+ tt = 3
+ else:
+ vbytes = b''
+ tt = 3
+ elif len(vbytes) == 0:
tt = 3 # Control
elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1:
vbytes = bytes(f'<0x{vbytes[0]:02X}>', encoding = 'UTF-8')
@@ -246,6 +254,9 @@ class GGMLToGGUF:
gguf_writer.add_token_list(tokens)
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)
+ gguf_writer.add_unk_token_id(0)
+ gguf_writer.add_bos_token_id(1)
+ gguf_writer.add_eos_token_id(2)
def add_tensors(self, gguf_writer):
nm = self.name_map
@@ -330,4 +341,5 @@ def main():
converter.save()
print(f'* Successful completion. Output saved to: {cfg.output}')
-main()
+if __name__ == '__main__':
+ main()
diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py
old mode 100644
new mode 100755
index f8cfdaa80..08fde238b
--- a/convert-llama-hf-to-gguf.py
+++ b/convert-llama-hf-to-gguf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# HF llama --> gguf conversion
import gguf
diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py
index b4999ff5a..a94a7d0af 100755
--- a/convert-lora-to-ggml.py
+++ b/convert-lora-to-ggml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import json
import os
import re
@@ -6,23 +6,22 @@ import struct
import sys
from typing import Any, Dict, Sequence, TextIO
+import numpy as np
import torch
-from convert import DATA_TYPE_TO_FTYPE, NUMPY_TYPE_TO_DATA_TYPE, DataType
+NUMPY_TYPE_TO_FTYPE: Dict[str, int] = {"float32": 0, "float16": 1}
+
HF_SUBLAYER_TO_GGML = {
- "self_attn.q_proj": "attention.wq",
- "self_attn.k_proj": "attention.wk",
- "self_attn.v_proj": "attention.wv",
- "self_attn.o_proj": "attention.wo",
- "mlp.gate_proj": "feed_forward.w1",
- "mlp.down_proj": "feed_forward.w2",
- "mlp.up_proj": "feed_forward.w3",
- "input_layernorm": "attention_norm",
+ "self_attn.q_proj": "attn_q",
+ "self_attn.k_proj": "attn_k",
+ "self_attn.v_proj": "attn_v",
+ "self_attn.o_proj": "attn_output",
+ "mlp.gate_proj": "ffn_gate",
+ "mlp.down_proj": "ffn_down",
+ "mlp.up_proj": "ffn_up",
+ "input_layernorm": "attn_norm",
"post_attention_layernorm": "ffn_norm",
- # "norm": "norm",
- # "embed_tokens": "tok_embeddings",
- # "lm_head": "output",
}
@@ -39,7 +38,7 @@ def translate_tensor_name(t: str) -> str:
sys.exit(1)
output_string = (
- f"layers.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora{lora_type}"
+ f"blk.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora{lora_type}"
)
return output_string
else:
@@ -54,12 +53,14 @@ def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
# https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
# but some models ship a float value instead
# let's convert to int, but fail if lossless conversion is not possible
- assert int(params["lora_alpha"]) == params["lora_alpha"], "cannot convert float to int losslessly"
+ assert (
+ int(params["lora_alpha"]) == params["lora_alpha"]
+ ), "cannot convert float to int losslessly"
fout.write(struct.pack("i", int(params["lora_alpha"])))
def write_tensor_header(
- self, name: str, shape: Sequence[int], data_type: DataType
+ self, name: str, shape: Sequence[int], data_type: np.dtype
) -> None:
sname = name.encode("utf-8")
fout.write(
@@ -67,7 +68,7 @@ def write_tensor_header(
"iii",
len(shape),
len(sname),
- DATA_TYPE_TO_FTYPE[NUMPY_TYPE_TO_DATA_TYPE[data_type]],
+ NUMPY_TYPE_TO_FTYPE[data_type.name],
)
)
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
diff --git a/convert.py b/convert.py
old mode 100644
new mode 100755
index e720889fd..b7c626d84
--- a/convert.py
+++ b/convert.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import gguf
import argparse
@@ -106,6 +106,9 @@ class Params:
ftype: Optional[GGMLFileType] = None
+ # path to the directory containing the model files
+ path_model: Optional['Path'] = None
+
@staticmethod
def find_n_mult(n_ff: int, n_embd: int) -> int:
# hardcoded magic range
@@ -191,7 +194,7 @@ class Params:
def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
config = json.load(open(config_path))
- n_vocab = config["vocab_size"]
+ n_vocab = config["vocab_size"] if "vocab_size" in config else -1
n_embd = config["dim"]
n_layer = config["n_layers"]
n_mult = config["multiple_of"]
@@ -231,6 +234,8 @@ class Params:
else:
params = Params.guessed(model_plus.model)
+ params.path_model = model_plus.paths[0].parent
+
return params
@@ -733,7 +738,13 @@ class OutputFile:
self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
def add_meta_arch(self, params: Params) -> None:
- self.gguf.add_name ("LLaMA")
+ name = "LLaMA"
+ if (params.n_ctx == 4096):
+ name = "LLaMA v2"
+ if params.path_model:
+ name = str(params.path_model.parent).split('/')[-1]
+
+ self.gguf.add_name (name)
self.gguf.add_context_length (params.n_ctx)
self.gguf.add_embedding_length (params.n_embd)
self.gguf.add_block_count (params.n_layer)
diff --git a/examples/convert-llama2c-to-ggml/README.md b/examples/convert-llama2c-to-ggml/README.md
index 868f57d6d..fd561fcbc 100644
--- a/examples/convert-llama2c-to-ggml/README.md
+++ b/examples/convert-llama2c-to-ggml/README.md
@@ -12,15 +12,19 @@ usage: ./convert-llama2c-to-ggml [options]
options:
-h, --help show this help message and exit
- --copy-vocab-from-model FNAME model path from which to copy vocab (default 'models/ggml-vocab.bin')
+ --copy-vocab-from-model FNAME model path from which to copy vocab (default 'tokenizer.bin')
--llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model
--llama2c-output-model FNAME model path to save the converted llama2.c model (default ak_llama_model.bin')
```
-An example command is as follows:
+An example command using a model from [karpathy/tinyllamas](https://huggingface.co/karpathy/tinyllamas) is as follows:
-`$ ./convert-llama2c-to-ggml --copy-vocab-from-model --llama2c-model --llama2c-output-model `
+`$ ./convert-llama2c-to-ggml --copy-vocab-from-model ../llama2.c/tokenizer.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.ggmlv3.bin`
-Now you can use the model with command like:
+For now the generated model is in the legacy GGJTv3 format, so you need to convert it to gguf manually:
-`$ ./main -m -p "One day, Lily met a Shoggoth" -n 500 -c 256 -eps 1e-5`
+`$ python ./convert-llama-ggmlv3-to-gguf.py --eps 1e-5 --input stories42M.ggmlv3.bin --output stories42M.gguf.bin`
+
+Now you can use the model with a command like:
+
+`$ ./main -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256`
diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
index 469d6e3de..f8a58dc7a 100644
--- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
+++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
@@ -17,6 +17,9 @@
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
+#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
+#define LLAMA_FILE_VERSION_GGJT_V3 3
+
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
typedef struct {
int dim; // transformer dimension
@@ -49,10 +52,10 @@ typedef struct {
// float* freq_cis_real; // (seq_len, dim/2)
// float* freq_cis_imag; // (seq_len, dim/2)
// (optional) classifier weights for the logits, on the last layer
- //float* wcls;
+ float* wcls;
} TransformerWeights;
-void malloc_weights(TransformerWeights* w, Config* p) {
+void malloc_weights(TransformerWeights* w, Config* p, bool shared_weights) {
// we calloc instead of malloc to keep valgrind happy
w->token_embedding_table = new float[p->vocab_size * p->dim]();
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
@@ -86,9 +89,16 @@ void malloc_weights(TransformerWeights* w, Config* p) {
w->rms_final_weight = new float[p->dim]();
printf("[%s:AK] Allocating [%d] float space for w->rms_final_weight\n",__func__,p->dim);
+
+ if (shared_weights) {
+ w->wcls = NULL;
+ } else {
+ w->wcls = new float[p->vocab_size * p->dim]();
+ printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
+ }
}
-int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f) {
+int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f, bool shared_weights) {
if (fread(w->token_embedding_table, sizeof(float), p->vocab_size * p->dim, f) != static_cast(p->vocab_size * p->dim)) return 1;
if (fread(w->rms_att_weight, sizeof(float), p->n_layers * p->dim, f) != static_cast(p->n_layers * p->dim)) return 1;
if (fread(w->wq, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast(p->n_layers * p->dim * p->dim)) return 1;
@@ -100,6 +110,22 @@ int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f) {
if (fread(w->w2, sizeof(float), p->n_layers * p->hidden_dim * p->dim, f) != static_cast(p->n_layers * p->hidden_dim * p->dim)) return 1;
if (fread(w->w3, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != static_cast(p->n_layers * p->dim * p->hidden_dim)) return 1;
if (fread(w->rms_final_weight, sizeof(float), p->dim, f) != static_cast(p->dim)) return 1;
+
+ // Skip freq_cis_real & freq_cis_imag
+ int head_size = p->dim / p->n_heads;
+ fseek(f, p->seq_len * head_size * sizeof(float), SEEK_CUR);
+
+ if (!shared_weights && fread(w->wcls, sizeof(float), p->vocab_size * p->dim, f) != static_cast(p->vocab_size * p->dim)) return 1;
+
+ // Check we didn't forget to read anything
+ auto curr = ftell(f);
+ fseek(f, 0, SEEK_END);
+ auto end = ftell(f);
+ if (curr != end) {
+ printf("Error: failed to read the checkpoint file to the end (curr = %ld, end = %ld)\n", curr, end);
+ return 1;
+ }
+
return 0;
}
@@ -115,6 +141,7 @@ void free_weights(TransformerWeights* w) {
delete w->w2;
delete w->w3;
delete w->rms_final_weight;
+ if (w->wcls) delete w->wcls;
}
void print_sample_weights(TransformerWeights *w){
@@ -131,6 +158,7 @@ void print_sample_weights(TransformerWeights *w){
printf("%f\n", w->w2[0]);
printf("%f\n", w->w3[0]);
printf("%f\n", w->rms_att_weight[0]);
+ if (w->wcls) printf("%f\n", w->wcls[0]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -509,26 +537,28 @@ bool is_ggml_file(const char *filename) {
}
void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab) {
- // heuristic to infer whether vocab is from ggml or from llama2.c vocabulary
- if (is_ggml_file(filename)) {
-
- struct llama_context_params llama_params = llama_context_default_params();
- llama_params.vocab_only = true;
-
- struct llama_model * lmodel = llama_load_model_from_file(filename, llama_params);
- struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params);
-
- const int n_vocab = llama_n_vocab(lctx);
- vocab->id_to_token.resize(n_vocab);
- for (int i=0; iid_to_token[i].text = llama_token_get_text(lctx, i);
- vocab->id_to_token[i].score = llama_token_get_score(lctx, i);
- vocab->id_to_token[i].type = llama_token_get_type(lctx, i);
- vocab->token_to_id.emplace(vocab->id_to_token[i].text, i);
- }
- llama_free(lctx);
- llama_free_model(lmodel);
- } else { // assume llama2.c vocabulary
+#pragma message("TODO: implement reading vocabulary using gguf")
+// // heuristic to infer whether vocab is from ggml or from llama2.c vocabulary
+// if (is_ggml_file(filename)) {
+//
+// struct llama_context_params llama_params = llama_context_default_params();
+// llama_params.vocab_only = true;
+//
+// struct llama_model * lmodel = llama_load_model_from_file(filename, llama_params);
+// struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params);
+//
+// const int n_vocab = llama_n_vocab(lctx);
+// vocab->id_to_token.resize(n_vocab);
+// for (int i=0; iid_to_token[i].text = llama_token_get_text(lctx, i);
+// vocab->id_to_token[i].score = llama_token_get_score(lctx, i);
+// vocab->id_to_token[i].type = llama_token_get_type(lctx, i);
+// vocab->token_to_id.emplace(vocab->id_to_token[i].text, i);
+// }
+// llama_free(lctx);
+// llama_free_model(lmodel);
+// } else
+ { // assume llama2.c vocabulary
printf("Assuming llama2.c vocabulary since %s is not a ggml file\n", filename);
llama_file file(filename, "rb");
const int n_vocab = config->vocab_size;
@@ -538,6 +568,12 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
float_t score = file.read_f32();
uint32_t len = file.read_u32();
std::string text = file.read_string(len);
+ // Special-case handling of <0xXX> single byte tokens.
+ char byte_val;
+ if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) {
+ char cstr[2] = { byte_val, 0 };
+ text = cstr;
+ }
vocab->id_to_token[i].text = text;
vocab->id_to_token[i].score = score;
vocab->id_to_token[i].type = LLAMA_TOKEN_TYPE_UNDEFINED;
@@ -589,83 +625,80 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
}
#pragma message("TODO: implement file saving using gguf")
- (void) vocab;
- (void) model;
- (void) w;
-// // write_magic
-// file.write_u32(LLAMA_FILE_MAGIC); // magic
-// file.write_u32(LLAMA_FILE_VERSION); // version
-// // write_hparams
-// file.write_u32(model->hparams.n_vocab);
-// file.write_u32(model->hparams.n_embd);
-// file.write_u32(model->hparams.n_mult);
-// file.write_u32(model->hparams.n_head);
-// file.write_u32(model->hparams.n_layer);
-// file.write_u32(model->hparams.n_rot);
-// file.write_u32(LLAMA_FTYPE_ALL_F32);
-//
-// // write_vocab - for now we are just writing the existing BPE voc. assuming karpathy's vocabulary is the same. idk.
-// uint32_t n_vocab = model->hparams.n_vocab;
-// for (uint32_t i = 0; i < n_vocab; i++) {
-// const auto & token_data = vocab->id_to_token.at(i);
-// file.write_u32((uint32_t) token_data.tok.size());
-// file.write_raw(token_data.tok.data(), token_data.tok.size());
-// file.write_raw(&token_data.score, sizeof(token_data.score));
-// }
-//
-// // stuff AK weights into GG weights one by one.
-// // w->token_embedding_table -> model->tok_embeddings
-// // float* -> struct ggml_tensor
-// stuff_karpathy_weights_into_gg(model->tok_embeddings, w->token_embedding_table);
-// stuff_karpathy_weights_into_gg(model->output, w->token_embedding_table);
-//
-// stuff_karpathy_weights_into_gg(model->norm, w->rms_final_weight);
-// //print_row(model->norm, 0);
-//
-// // for rms-att-weight
-// int row_length = model->hparams.n_embd;
-// const auto & hparams = model->hparams;
-// //int n_ff = model->hparams.n_embd;
-// int n_ff = get_n_ff(&hparams);
-//
-// for (uint32_t i = 0; i < model->hparams.n_layer; ++i){
-// auto & layer = model->layers[i];
-// // 1d
-// stuff_karpathy_weights_into_gg(layer.attention_norm, &w->rms_att_weight[i*row_length]);
-// stuff_karpathy_weights_into_gg(layer.ffn_norm , &w->rms_ffn_weight[i*row_length]);
-//
-// // from 3d matrix layer x dim x dim to 2d matrix dim x dim
-// stuff_karpathy_weights_into_gg(layer.wq , &w->wq[i*row_length*row_length]);
-// stuff_karpathy_weights_into_gg(layer.wk , &w->wk[i*row_length*row_length]);
-// stuff_karpathy_weights_into_gg(layer.wv , &w->wv[i*row_length*row_length]);
-// stuff_karpathy_weights_into_gg(layer.wo , &w->wo[i*row_length*row_length]);
-//
-// stuff_karpathy_weights_into_gg(layer.w1 , &w->w1[i*row_length*n_ff]);
-// stuff_karpathy_weights_into_gg(layer.w2 , &w->w2[i*n_ff*row_length]);
-// stuff_karpathy_weights_into_gg(layer.w3 , &w->w3[i*row_length*n_ff]);
-// }
-// // write tensors
-// write_tensor(&file, model->tok_embeddings);
-// write_tensor(&file, model->norm);
-// write_tensor(&file, model->output); // ?
-// for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
-// auto & layer = model->layers[i];
-//
-// write_tensor(&file, layer.attention_norm);
-// write_tensor(&file, layer.wq);
-// write_tensor(&file, layer.wk);
-// write_tensor(&file, layer.wv);
-// write_tensor(&file, layer.wo);
-// write_tensor(&file, layer.ffn_norm);
-// write_tensor(&file, layer.w1);
-// write_tensor(&file, layer.w2);
-// write_tensor(&file, layer.w3);
-// }
+ // write_magic
+ file.write_u32(LLAMA_FILE_MAGIC_GGJT); // magic
+ file.write_u32(LLAMA_FILE_VERSION_GGJT_V3); // version
+ // write_hparams
+ file.write_u32(model->hparams.n_vocab);
+ file.write_u32(model->hparams.n_embd);
+ file.write_u32(model->hparams.n_mult);
+ file.write_u32(model->hparams.n_head);
+ file.write_u32(model->hparams.n_layer);
+ file.write_u32(model->hparams.n_rot);
+ file.write_u32(LLAMA_FTYPE_ALL_F32);
+
+ // write_vocab - for now we are just writing the existing BPE voc. assuming karpathy's vocabulary is the same. idk.
+ uint32_t n_vocab = model->hparams.n_vocab;
+ for (uint32_t i = 0; i < n_vocab; i++) {
+ const auto & token_data = vocab->id_to_token.at(i);
+ file.write_u32((uint32_t) token_data.text.size());
+ file.write_raw(token_data.text.data(), token_data.text.size());
+ file.write_raw(&token_data.score, sizeof(token_data.score));
+ }
+
+ // stuff AK weights into GG weights one by one.
+ // w->token_embedding_table -> model->tok_embeddings
+ // float* -> struct ggml_tensor
+ stuff_karpathy_weights_into_gg(model->tok_embeddings, w->token_embedding_table);
+ stuff_karpathy_weights_into_gg(model->output, w->wcls ? w->wcls : w->token_embedding_table);
+
+ stuff_karpathy_weights_into_gg(model->norm, w->rms_final_weight);
+ //print_row(model->norm, 0);
+
+ // for rms-att-weight
+ int row_length = model->hparams.n_embd;
+ const auto & hparams = model->hparams;
+ //int n_ff = model->hparams.n_embd;
+ int n_ff = get_n_ff(&hparams);
+
+ for (uint32_t i = 0; i < model->hparams.n_layer; ++i){
+ auto & layer = model->layers[i];
+ // 1d
+ stuff_karpathy_weights_into_gg(layer.attention_norm, &w->rms_att_weight[i*row_length]);
+ stuff_karpathy_weights_into_gg(layer.ffn_norm , &w->rms_ffn_weight[i*row_length]);
+
+ // from 3d matrix layer x dim x dim to 2d matrix dim x dim
+ stuff_karpathy_weights_into_gg(layer.wq , &w->wq[i*row_length*row_length]);
+ stuff_karpathy_weights_into_gg(layer.wk , &w->wk[i*row_length*row_length]);
+ stuff_karpathy_weights_into_gg(layer.wv , &w->wv[i*row_length*row_length]);
+ stuff_karpathy_weights_into_gg(layer.wo , &w->wo[i*row_length*row_length]);
+
+ stuff_karpathy_weights_into_gg(layer.w1 , &w->w1[i*row_length*n_ff]);
+ stuff_karpathy_weights_into_gg(layer.w2 , &w->w2[i*n_ff*row_length]);
+ stuff_karpathy_weights_into_gg(layer.w3 , &w->w3[i*row_length*n_ff]);
+ }
+ // write tensors
+ write_tensor(&file, model->tok_embeddings);
+ write_tensor(&file, model->norm);
+ write_tensor(&file, model->output); // ?
+ for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
+ auto & layer = model->layers[i];
+
+ write_tensor(&file, layer.attention_norm);
+ write_tensor(&file, layer.wq);
+ write_tensor(&file, layer.wk);
+ write_tensor(&file, layer.wv);
+ write_tensor(&file, layer.wo);
+ write_tensor(&file, layer.ffn_norm);
+ write_tensor(&file, layer.w1);
+ write_tensor(&file, layer.w2);
+ write_tensor(&file, layer.w3);
+ }
}
struct train_params get_default_train_params() {
struct train_params params;
- params.fn_vocab_model = "models/ggml-vocab.bin";
+ params.fn_vocab_model = "tokenizer.bin";
params.fn_llama2c_output_model = "ak_llama_model.bin";
params.fn_train_data = "shakespeare.txt";
params.fn_checkpoint_in = "checkpoint.bin";
@@ -718,7 +751,7 @@ void print_usage(int /*argc*/, char ** argv, const struct train_params * params)
fprintf(stderr, "\n");
fprintf(stderr, "options:\n");
fprintf(stderr, " -h, --help show this help message and exit\n");
- fprintf(stderr, " --copy-vocab-from-model FNAME llama2.c vocabulary or ggml model path from which to copy vocab (default '%s')\n", params->fn_vocab_model);
+ fprintf(stderr, " --copy-vocab-from-model FNAME llama2.c vocabulary or ggmlv3 model path from which to copy vocab (default '%s')\n", params->fn_vocab_model);
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
fprintf(stderr, "\n");
@@ -791,9 +824,12 @@ int main(int argc, char ** argv) {
if (!file) { printf("Unable to open the checkpoint file %s!\n", params.fn_llama2c_model); return 1; }
// read in the config header
if(fread(&config, sizeof(Config), 1, file) != 1) { return 1; }
+ auto shared_weights = config.vocab_size > 0;
+ config.vocab_size = abs(config.vocab_size);
+
// read in the Transformer weights
- malloc_weights(&weights, &config);
- if(checkpoint_init_weights(&weights, &config, file)) { return 1; }
+ malloc_weights(&weights, &config, shared_weights);
+ if(checkpoint_init_weights(&weights, &config, file, shared_weights)) { return 1; }
fclose(file);
}
diff --git a/examples/embd-input/embd_input.py b/examples/embd-input/embd_input.py
old mode 100644
new mode 100755
index be2896614..f146acdc1
--- a/examples/embd-input/embd_input.py
+++ b/examples/embd-input/embd_input.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import ctypes
from ctypes import cdll, c_char_p, c_void_p, POINTER, c_float, c_int
import numpy as np
diff --git a/examples/embd-input/llava.py b/examples/embd-input/llava.py
old mode 100644
new mode 100755
index bcbdd2bed..06fad55f4
--- a/examples/embd-input/llava.py
+++ b/examples/embd-input/llava.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
diff --git a/examples/embd-input/minigpt4.py b/examples/embd-input/minigpt4.py
old mode 100644
new mode 100755
index 15c9b77c0..7b13e4a5c
--- a/examples/embd-input/minigpt4.py
+++ b/examples/embd-input/minigpt4.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
diff --git a/examples/embd-input/panda_gpt.py b/examples/embd-input/panda_gpt.py
old mode 100644
new mode 100755
index 0cfac5f32..891ad7cc9
--- a/examples/embd-input/panda_gpt.py
+++ b/examples/embd-input/panda_gpt.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
diff --git a/examples/jeopardy/graph.py b/examples/jeopardy/graph.py
old mode 100644
new mode 100755
index 1b6c54bff..8bc0706b8
--- a/examples/jeopardy/graph.py
+++ b/examples/jeopardy/graph.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import matplotlib.pyplot as plt
import os
import csv
diff --git a/examples/jeopardy/jeopardy.sh b/examples/jeopardy/jeopardy.sh
old mode 100644
new mode 100755
diff --git a/examples/json-schema-to-grammar.py b/examples/json-schema-to-grammar.py
old mode 100644
new mode 100755
index 2dccc118a..2a4cb65bc
--- a/examples/json-schema-to-grammar.py
+++ b/examples/json-schema-to-grammar.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import argparse
import json
import re
diff --git a/examples/main/README.md b/examples/main/README.md
index 60e3907d5..d555afdcc 100644
--- a/examples/main/README.md
+++ b/examples/main/README.md
@@ -288,6 +288,10 @@ These options help improve the performance and memory usage of the LLaMA models.
- `--prompt-cache FNAME`: Specify a file to cache the model state after the initial prompt. This can significantly speed up the startup time when you're using longer prompts. The file is created during the first run and is reused and updated in subsequent runs. **Note**: Restoring a cached prompt does not imply restoring the exact state of the session at the point it was saved. So even when specifying a specific seed, you are not guaranteed to get the same sequence of tokens as the original generation.
+### Grammars
+
+- `--grammar GRAMMAR`, `--grammar-file FILE`: Specify a grammar (defined inline or in a file) to constrain model output to a specific format. For example, you could force the model to output JSON or to speak only in emojis. See the [GBNF guide](../../grammars/README.md) for details on the syntax.
+
### Quantization
For information about 4-bit quantization, which can significantly improve performance and reduce memory usage, please refer to llama.cpp's primary [README](../../README.md#prepare-data--run).
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index ce48b30ca..3bcb551b4 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -47,7 +47,7 @@ static bool is_interacting = false;
void sigint_handler(int signo) {
if (signo == SIGINT) {
if (!is_interacting) {
- is_interacting=true;
+ is_interacting = true;
} else {
console::cleanup();
printf("\n");
@@ -220,23 +220,30 @@ int main(int argc, char ** argv) {
}
}
+ const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
+
// tokenize the prompt
std::vector embd_inp;
if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) {
- embd_inp = ::llama_tokenize(ctx, params.prompt, true);
+ embd_inp = ::llama_tokenize(ctx, params.prompt, is_spm);
} else {
embd_inp = session_tokens;
}
+ // Should not run without any tokens
+ if (embd_inp.empty()) {
+ embd_inp.push_back(llama_token_bos(ctx));
+ }
+
// Tokenize negative prompt
std::vector guidance_inp;
int guidance_offset = 0;
int original_prompt_len = 0;
if (ctx_guidance) {
params.cfg_negative_prompt.insert(0, 1, ' ');
- guidance_inp = ::llama_tokenize(ctx_guidance, params.cfg_negative_prompt, true);
+ guidance_inp = ::llama_tokenize(ctx_guidance, params.cfg_negative_prompt, is_spm);
- std::vector original_inp = ::llama_tokenize(ctx, params.prompt, true);
+ std::vector original_inp = ::llama_tokenize(ctx, params.prompt, is_spm);
original_prompt_len = original_inp.size();
guidance_offset = (int)guidance_inp.size() - original_prompt_len;
}
@@ -283,8 +290,8 @@ int main(int argc, char ** argv) {
}
// prefix & suffix for instruct mode
- const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true);
- const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
+ const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", is_spm);
+ const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
// in instruct mode, we inject a prefix and a suffix to each input by the user
if (params.instruct) {
@@ -822,7 +829,8 @@ int main(int argc, char ** argv) {
}
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
- if (params.interactive && n_remain <= 0 && params.n_predict != -1) {
+ // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
+ if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
n_remain = params.n_predict;
is_interacting = true;
}
diff --git a/examples/make-ggml.py b/examples/make-ggml.py
old mode 100644
new mode 100755
index f63d9fc22..6a34eeac5
--- a/examples/make-ggml.py
+++ b/examples/make-ggml.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
"""
This script converts Hugging Face llama models to GGML and quantizes them.
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index f3c045aec..a7bd9db2a 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -27,12 +27,136 @@ std::vector softmax(const std::vector& logits) {
return probs;
}
-void perplexity(llama_context * ctx, const gpt_params & params) {
+void perplexity_v2(llama_context * ctx, const gpt_params & params) {
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
// Output: `perplexity: 13.5106 [114/114]`
// BOS tokens will be added for each chunk before eval
- auto tokens = ::llama_tokenize(ctx, params.prompt, true);
+
+ if (params.ppl_stride <= 0) {
+ fprintf(stderr, "%s: stride is %d but must be greater than zero!\n",__func__,params.ppl_stride);
+ return;
+ }
+
+ const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
+ const bool add_bos = is_spm;
+
+ fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
+
+ auto tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
+
+ const int calc_chunk = params.n_ctx;
+
+ fprintf(stderr, "%s: have %zu tokens. Calculation chunk = %d\n", __func__, tokens.size(), calc_chunk);
+
+ if (int(tokens.size()) <= calc_chunk) {
+ fprintf(stderr, "%s: there are only %zu tokens, this is not enough for a context size of %d and stride %d\n",__func__,
+ tokens.size(), params.n_ctx, params.ppl_stride);
+ return;
+ }
+
+ const int n_chunk_max = (tokens.size() - calc_chunk + params.ppl_stride - 1) / params.ppl_stride;
+
+ const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
+ const int n_vocab = llama_n_vocab(ctx);
+ const int n_batch = params.n_batch;
+
+ int count = 0;
+ double nll = 0.0;
+
+ fprintf(stderr, "%s: calculating perplexity over %d chunks, batch_size=%d\n", __func__, n_chunk, n_batch);
+
+ for (int i = 0; i < n_chunk; ++i) {
+ const int start = i * params.ppl_stride;
+ const int end = start + calc_chunk;
+
+ const int num_batches = (calc_chunk + n_batch - 1) / n_batch;
+ //fprintf(stderr, "%s: evaluating %d...%d using %d batches\n", __func__, start, end, num_batches);
+
+ std::vector logits;
+
+ const auto t_start = std::chrono::high_resolution_clock::now();
+
+ for (int j = 0; j < num_batches; ++j) {
+ const int batch_start = start + j * n_batch;
+ const int batch_size = std::min(end - batch_start, n_batch);
+
+ //fprintf(stderr, " Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
+ if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
+ //fprintf(stderr, "%s : failed to eval\n", __func__);
+ return;
+ }
+
+ // save original token and restore it after eval
+ const auto token_org = tokens[batch_start];
+
+ // add BOS token for the first batch of each chunk
+ if (add_bos && j == 0) {
+ tokens[batch_start] = llama_token_bos(ctx);
+ }
+
+ const auto batch_logits = llama_get_logits(ctx);
+ logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
+
+ if (j == 0) {
+ tokens[batch_start] = token_org;
+ }
+ }
+
+ const auto t_end = std::chrono::high_resolution_clock::now();
+
+ if (i == 0) {
+ const float t_total = std::chrono::duration(t_end - t_start).count();
+ fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
+ int total_seconds = (int)(t_total * n_chunk);
+ if (total_seconds >= 60*60) {
+ fprintf(stderr, "%d hours ", total_seconds / (60*60));
+ total_seconds = total_seconds % (60*60);
+ }
+ fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
+ }
+
+ //fprintf(stderr, "%s: using tokens %d...%d\n",__func__,params.n_ctx - params.ppl_stride + start, params.n_ctx + start);
+ for (int j = params.n_ctx - params.ppl_stride - 1; j < params.n_ctx - 1; ++j) {
+
+ // Calculate probability of next token, given the previous ones.
+ const std::vector tok_logits(
+ logits.begin() + (j + 0) * n_vocab,
+ logits.begin() + (j + 1) * n_vocab);
+
+ const float prob = softmax(tok_logits)[tokens[start + j + 1]];
+
+ nll += -std::log(prob);
+ ++count;
+ }
+ // perplexity is e^(average negative log-likelihood)
+ if (params.ppl_output_type == 0) {
+ printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
+ } else {
+ printf("%8d %.4lf\n", i*params.ppl_stride, std::exp(nll / count));
+ }
+ fflush(stdout);
+ }
+ printf("\n");
+}
+
+void perplexity(llama_context * ctx, const gpt_params & params) {
+ if (params.ppl_stride > 0) {
+ perplexity_v2(ctx, params);
+ return;
+ }
+
+ // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
+ // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
+ // Output: `perplexity: 13.5106 [114/114]`
+ // BOS tokens will be added for each chunk before eval
+
+ const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
+ const bool add_bos = is_spm;
+
+ fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
+
+ auto tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
const int n_chunk_max = tokens.size() / params.n_ctx;
@@ -63,7 +187,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
const auto token_org = tokens[batch_start];
// add BOS token for the first batch of each chunk
- if (j == 0) {
+ if (add_bos && j == 0) {
tokens[batch_start] = llama_token_bos(ctx);
}
@@ -116,7 +240,11 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
++count;
}
// perplexity is e^(average negative log-likelihood)
- printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
+ if (params.ppl_output_type == 0) {
+ printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
+ } else {
+ printf("%8d %.4lf\n", i*params.n_ctx, std::exp(nll / count));
+ }
fflush(stdout);
}
printf("\n");
@@ -177,8 +305,10 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) {
size_t hs_task_count = prompt_lines.size()/6;
fprintf(stderr, "%s : loaded %zu tasks from prompt.\n", __func__, hs_task_count);
+ const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
+
// This is needed as usual for LLaMA models
- bool prepend_bos = true;
+ const bool add_bos = is_spm;
// Number of tasks to use when computing the score
if ( params.hellaswag_tasks < hs_task_count ) {
@@ -234,14 +364,13 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) {
std::vector tok_logits(n_vocab);
for (size_t task_idx = 0; task_idx < hs_task_count; task_idx++) {
-
// Tokenize the context to count tokens
- std::vector context_embd = ::llama_tokenize(ctx, hs_data[task_idx].context, prepend_bos);
+ std::vector context_embd = ::llama_tokenize(ctx, hs_data[task_idx].context, add_bos);
size_t context_size = context_embd.size();
// Do the 1st ending
// In this case we include the context when evaluating
- auto query_embd = ::llama_tokenize(ctx, hs_data[task_idx].context + hs_data[task_idx].ending[0], prepend_bos);
+ auto query_embd = ::llama_tokenize(ctx, hs_data[task_idx].context + hs_data[task_idx].ending[0], add_bos);
auto query_size = query_embd.size();
//printf("First query: %d\n",(int)query_size);
@@ -369,6 +498,12 @@ int main(int argc, char ** argv) {
params.perplexity = true;
params.n_batch = std::min(params.n_batch, params.n_ctx);
+ if (params.ppl_stride > 0) {
+ fprintf(stderr, "Will perform strided perplexity calculation -> adjusting context size from %d to %d\n",
+ params.n_ctx, params.n_ctx + params.ppl_stride/2);
+ params.n_ctx += params.ppl_stride/2;
+ }
+
if (params.n_ctx > 2048) {
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
"expect poor results\n", __func__, params.n_ctx);
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index f628d0642..d172f645a 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -14,25 +14,25 @@ struct quant_option {
};
static const std::vector QUANT_OPTIONS = {
- { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.50G, +0.2499 ppl @ 7B", },
- { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1846 ppl @ 7B", },
- { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.30G, +0.0796 ppl @ 7B", },
- { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0415 ppl @ 7B", },
+ { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.56G, +0.2166 ppl @ LLaMA-v1-7B", },
+ { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1585 ppl @ LLaMA-v1-7B", },
+ { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.33G, +0.0683 ppl @ LLaMA-v1-7B", },
+ { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0349 ppl @ LLaMA-v1-7B", },
#ifdef GGML_USE_K_QUANTS
- { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.67G, +0.8698 ppl @ 7B", },
+ { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", },
{ "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
- { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5505 ppl @ 7B", },
- { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.06G, +0.2437 ppl @ 7B", },
- { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1803 ppl @ 7B", },
+ { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", },
+ { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.07G, +0.2496 ppl @ LLaMA-v1-7B", },
+ { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", },
{ "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
- { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.56G, +0.1149 ppl @ 7B", },
- { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0535 ppl @ 7B", },
+ { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", },
+ { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0532 ppl @ LLaMA-v1-7B", },
{ "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
- { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0353 ppl @ 7B", },
- { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0142 ppl @ 7B", },
- { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0044 ppl @ 7B", },
+ { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0400 ppl @ LLaMA-v1-7B", },
+ { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
+ { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, -0.0008 ppl @ LLaMA-v1-7B", },
#endif
- { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ 7B", },
+ { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", },
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
};
diff --git a/examples/reason-act.sh b/examples/reason-act.sh
index e7fe655db..046c48db5 100755
--- a/examples/reason-act.sh
+++ b/examples/reason-act.sh
@@ -1,4 +1,3 @@
-
#!/bin/bash
cd `dirname $0`
diff --git a/examples/server-llama2-13B.sh b/examples/server-llama2-13B.sh
old mode 100644
new mode 100755
diff --git a/examples/server/README.md b/examples/server/README.md
index 4d97db2e4..77997f98d 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -126,7 +126,7 @@ node .
`stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
- `prompt`: Provide a prompt. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate. A space is inserted in the front like main.cpp does.
+ `prompt`: Provide a prompt as a string, or as an array of strings and numbers representing tokens. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate. If the prompt is a string, or an array with the first element given as a string, a space is inserted in the front like main.cpp does.
`stop`: Specify a JSON array of stopping strings.
These words will not be included in the completion, so make sure to add them to the prompt for the next iteration (default: []).
diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py
index aa325a03e..ed19237b0 100755
--- a/examples/server/api_like_OAI.py
+++ b/examples/server/api_like_OAI.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import argparse
from flask import Flask, jsonify, request, Response
import urllib.parse
diff --git a/examples/server/chat-llama2.sh b/examples/server/chat-llama2.sh
old mode 100644
new mode 100755
diff --git a/examples/server/chat.sh b/examples/server/chat.sh
old mode 100644
new mode 100755
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index e5bc52cd0..1e6d10c1d 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -190,6 +190,7 @@ struct llama_server_context
size_t n_past = 0;
size_t n_remain = 0;
+ json prompt;
std::vector embd;
std::vector last_n_tokens;
@@ -267,6 +268,53 @@ struct llama_server_context
return true;
}
+ std::vector tokenize(json json_prompt, bool add_bos)
+ {
+ // If `add_bos` is true, we only add BOS, when json_prompt is a string,
+ // or the first element of the json_prompt array is a string.
+ std::vector prompt_tokens;
+
+ if (json_prompt.is_array())
+ {
+ bool first = true;
+ for (const auto& p : json_prompt)
+ {
+ if (p.is_string())
+ {
+ auto s = p.template get();
+ std::vector p;
+ if (first)
+ {
+ s.insert(0, 1, ' '); // add a space if it's the first
+ p = ::llama_tokenize(ctx, s, add_bos);
+ first = false;
+ }
+ else
+ {
+ p = ::llama_tokenize(ctx, s, false);
+ }
+ prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
+ }
+ else
+ {
+ if (first)
+ {
+ first = false;
+ }
+ prompt_tokens.push_back(p.template get());
+ }
+ }
+ }
+ else
+ {
+ auto s = json_prompt.template get();
+ s.insert(0, 1, ' '); // always add a first space
+ prompt_tokens = ::llama_tokenize(ctx, s, add_bos);
+ }
+
+ return prompt_tokens;
+ }
+
bool loadGrammar()
{
if (!params.grammar.empty()) {
@@ -294,8 +342,8 @@ struct llama_server_context
void loadPrompt()
{
- params.prompt.insert(0, 1, ' '); // always add a first space
- std::vector prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
+ auto prompt_tokens = tokenize(prompt, true); // always add BOS
+
num_prompt_tokens = prompt_tokens.size();
if (params.n_keep < 0)
@@ -1016,7 +1064,7 @@ static json format_final_response(llama_server_context &llama, const std::string
{"tokens_predicted", llama.num_tokens_predicted},
{"tokens_evaluated", llama.num_prompt_tokens},
{"generation_settings", format_generation_settings(llama)},
- {"prompt", llama.params.prompt},
+ {"prompt", llama.prompt},
{"truncated", llama.truncated},
{"stopped_eos", llama.stopped_eos},
{"stopped_word", llama.stopped_word},
@@ -1085,10 +1133,18 @@ static void parse_options_completion(const json &body, llama_server_context &lla
llama.params.penalize_nl = json_value(body, "penalize_nl", default_params.penalize_nl);
llama.params.n_keep = json_value(body, "n_keep", default_params.n_keep);
llama.params.seed = json_value(body, "seed", default_params.seed);
- llama.params.prompt = json_value(body, "prompt", default_params.prompt);
llama.params.grammar = json_value(body, "grammar", default_params.grammar);
llama.params.n_probs = json_value(body, "n_probs", default_params.n_probs);
+ if (body.count("prompt") != 0)
+ {
+ llama.prompt = body["prompt"];
+ }
+ else
+ {
+ llama.prompt = "";
+ }
+
llama.params.logit_bias.clear();
if (json_value(body, "ignore_eos", false))
{
@@ -1345,8 +1401,11 @@ int main(int argc, char **argv)
auto lock = llama.lock();
const json body = json::parse(req.body);
- const std::string content = json_value(body, "content", "");
- const std::vector tokens = llama_tokenize(llama.ctx, content, false);
+ std::vector tokens;
+ if (body.count("content") != 0)
+ {
+ tokens = llama.tokenize(body["content"], false);
+ }
const json data = format_tokenizer_response(tokens);
return res.set_content(data.dump(), "application/json"); });
@@ -1358,7 +1417,14 @@ int main(int argc, char **argv)
llama.rewind();
llama_reset_timings(llama.ctx);
- llama.params.prompt = json_value(body, "content", "");
+ if (body.count("content") != 0)
+ {
+ llama.prompt = body["content"];
+ }
+ else
+ {
+ llama.prompt = "";
+ }
llama.params.n_predict = 0;
llama.loadPrompt();
llama.beginCompletion();
diff --git a/ggml-alloc.c b/ggml-alloc.c
index f06f9a3c1..af4affa4e 100644
--- a/ggml-alloc.c
+++ b/ggml-alloc.c
@@ -68,7 +68,7 @@ struct ggml_allocr {
size_t max_size;
bool measure;
int parse_seq[GGML_MAX_NODES];
- bool has_parse_seq;
+ int parse_seq_len;
#ifdef GGML_ALLOCATOR_DEBUG
struct ggml_tensor * allocated_tensors[1024];
@@ -238,15 +238,11 @@ static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_t
alloc->n_free_blocks++;
}
-void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n) {
- int pos = 0;
+void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n) {
for (int i = 0; i < n; i++) {
- if (list[i] != -1) {
- alloc->parse_seq[pos] = list[i];
- pos++;
- }
+ alloc->parse_seq[i] = list[i];
}
- alloc->has_parse_seq = true;
+ alloc->parse_seq_len = n;
}
void ggml_allocr_reset(struct ggml_allocr * alloc) {
@@ -269,7 +265,7 @@ struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment)
/*.max_size = */ 0,
/*.measure = */ false,
/*.parse_seq = */ {0},
- /*.has_parse_seq = */ false,
+ /*.parse_seq_len = */ 0,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0},
#endif
@@ -298,7 +294,7 @@ struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
/*.max_size = */ 0,
/*.measure = */ true,
/*.parse_seq = */ {0},
- /*.has_parse_seq = */ false,
+ /*.parse_seq_len = */ 0,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0},
#endif
@@ -445,8 +441,8 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node)
else {
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
node->data = parent->data;
+ return;
}
- return;
}
}
}
@@ -497,69 +493,86 @@ static size_t ggml_allocator_alloc_graph_tensors_n(
allocate_node(alloc, input);
}
}
- for (int ind = 0; ind < gf->n_nodes; ind++) {
- int i;
- if (alloc->has_parse_seq) {
- i = alloc->parse_seq[ind];
- } else {
- i = ind;
- }
- struct ggml_tensor * node = gf->nodes[i];
+ // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
+ int last_barrier_pos = 0;
+ int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes;
- // allocate parents (leafs)
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * parent = node->src[j];
- if (parent == NULL) {
- break;
+ for (int ind = 0; ind < n_nodes; ind++) {
+ // allocate a node if there is no parse_seq or this is not a barrier
+ if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) {
+ int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind;
+ struct ggml_tensor * node = gf->nodes[i];
+
+ // allocate parents (leafs)
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * parent = node->src[j];
+ if (parent == NULL) {
+ break;
+ }
+ allocate_node(alloc, parent);
}
- allocate_node(alloc, parent);
+
+ // allocate node
+ allocate_node(alloc, node);
+
+ AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * parent = node->src[j];
+ if (parent == NULL) {
+ break;
+ }
+ AT_PRINTF("%s", parent->name);
+ if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
+ AT_PRINTF(", ");
+ }
+ }
+ AT_PRINTF("\n");
}
- // allocate node
- allocate_node(alloc, node);
-
- AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * parent = node->src[j];
- if (parent == NULL) {
- break;
- }
- AT_PRINTF("%s", parent->name);
- if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
- AT_PRINTF(", ");
- }
- }
- AT_PRINTF("\n");
// update parents
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * parent = node->src[j];
- if (parent == NULL) {
- break;
+ // update immediately if there is no parse_seq
+ // update only at barriers if there is parse_seq
+ if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] == -1) {
+ int update_start = alloc->parse_seq_len ? last_barrier_pos : ind;
+ int update_end = alloc->parse_seq_len ? ind : ind + 1;
+ for (int i = update_start; i < update_end; i++) {
+ int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i;
+ struct ggml_tensor * node = gf->nodes[node_i];
+
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * parent = node->src[j];
+ if (parent == NULL) {
+ break;
+ }
+ struct hash_node * p_hn = hash_get(ht, parent);
+ p_hn->n_children -= 1;
+
+ //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
+
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
+ if (ggml_is_view(parent)) {
+ struct ggml_tensor * view_src = get_view_source(parent);
+ struct hash_node * view_src_hn = hash_get(ht, view_src);
+ view_src_hn->n_views -= 1;
+ AT_PRINTF("view_src %s\n", view_src->name);
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
+ ggml_allocator_free_tensor(alloc, view_src);
+ }
+ }
+ else {
+ if (parent->data != node->data) {
+ ggml_allocator_free_tensor(alloc, parent);
+ }
+ }
+ }
+ }
}
- struct hash_node * p_hn = hash_get(ht, parent);
- p_hn->n_children -= 1;
-
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
-
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
- if (ggml_is_view(parent)) {
- struct ggml_tensor * view_src = get_view_source(parent);
- struct hash_node * view_src_hn = hash_get(ht, view_src);
- view_src_hn->n_views -= 1;
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src->n_children, view_src->n_views);
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
- ggml_allocator_free_tensor(alloc, view_src);
- }
- }
- else {
- if (parent->data != node->data) {
- ggml_allocator_free_tensor(alloc, parent);
- }
- }
+ AT_PRINTF("\n");
+ if (alloc->parse_seq_len) {
+ last_barrier_pos = ind + 1;
}
}
- AT_PRINTF("\n");
}
// free graph outputs here that wouldn't be freed otherwise because they have no children
if (outputs != NULL && outputs[g] != NULL) {
diff --git a/ggml-alloc.h b/ggml-alloc.h
index 14a4350ac..9559da758 100644
--- a/ggml-alloc.h
+++ b/ggml-alloc.h
@@ -12,7 +12,7 @@ GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
// tell the allocator to parse nodes following the order described in the list
// you should call this if your graph are optimized to execute out-of-order
-GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n);
+GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n);
GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index 70a950bb5..868b7a7b9 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -3907,6 +3907,29 @@ static __global__ void rope_f32(const float * x, float * dst, const int ncols, c
dst[i + 1] = x0*sin_theta + x1*cos_theta;
}
+// TODO: this implementation is wrong!
+//static __global__ void rope_neox_f32(const float * x, float * dst, const int ncols, const float p0,
+// const float p_delta, const int p_delta_rows, const float theta_scale) {
+// const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
+//
+// if (col >= ncols) {
+// return;
+// }
+//
+// const int row = blockDim.x*blockIdx.x + threadIdx.x;
+// const int i = row*ncols + col/2;
+//
+// const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
+// const float sin_theta = sinf(theta);
+// const float cos_theta = cosf(theta);
+//
+// const float x0 = x[i + 0];
+// const float x1 = x[i + ncols/2];
+//
+// dst[i + 0] = x0*cos_theta - x1*sin_theta;
+// dst[i + ncols/2] = x0*sin_theta + x1*cos_theta;
+//}
+
static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const float p, const float block_p, const float theta_scale) {
const int col = blockDim.x*blockIdx.x + threadIdx.x;
const int half_n_dims = ncols/4;
@@ -5515,7 +5538,8 @@ inline void ggml_cuda_op_rope(
const float theta_scale = powf(freq_base, -2.0f/n_dims);
- const bool is_glm = mode & 4;
+ const bool is_neox = mode & 2;
+ const bool is_glm = mode & 4;
// compute
if (is_glm) {
@@ -5523,6 +5547,9 @@ inline void ggml_cuda_op_rope(
const float id_p = min(p, n_ctx - 2.f);
const float block_p = max(p - (n_ctx - 2.f), 0.f);
rope_glm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, id_p, block_p, theta_scale, cudaStream_main);
+ } else if (is_neox) {
+ GGML_ASSERT(false && "RoPE NeoX not implemented yet");
+#pragma message("TODO: implement RoPE NeoX for CUDA")
} else {
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
diff --git a/ggml-metal.m b/ggml-metal.m
index 835c5f297..06eb3872e 100644
--- a/ggml-metal.m
+++ b/ggml-metal.m
@@ -63,6 +63,7 @@ struct ggml_metal_context {
GGML_METAL_DECL_KERNEL(get_rows_f16);
GGML_METAL_DECL_KERNEL(get_rows_q4_0);
GGML_METAL_DECL_KERNEL(get_rows_q4_1);
+ GGML_METAL_DECL_KERNEL(get_rows_q8_0);
GGML_METAL_DECL_KERNEL(get_rows_q2_K);
GGML_METAL_DECL_KERNEL(get_rows_q3_K);
GGML_METAL_DECL_KERNEL(get_rows_q4_K);
@@ -73,6 +74,7 @@ struct ggml_metal_context {
GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
+ GGML_METAL_DECL_KERNEL(mul_mat_q8_0_f32);
GGML_METAL_DECL_KERNEL(mul_mat_q2_K_f32);
GGML_METAL_DECL_KERNEL(mul_mat_q3_K_f32);
GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
@@ -81,6 +83,7 @@ struct ggml_metal_context {
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32);
+ GGML_METAL_DECL_KERNEL(mul_mm_q8_0_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32);
@@ -167,7 +170,9 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
#define GGML_METAL_ADD_KERNEL(name) \
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
- fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name); \
+ fprintf(stderr, "%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \
+ (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \
+ (int) ctx->pipeline_##name.threadExecutionWidth); \
if (error) { \
fprintf(stderr, "%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
return NULL; \
@@ -186,6 +191,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(get_rows_f16);
GGML_METAL_ADD_KERNEL(get_rows_q4_0);
GGML_METAL_ADD_KERNEL(get_rows_q4_1);
+ GGML_METAL_ADD_KERNEL(get_rows_q8_0);
GGML_METAL_ADD_KERNEL(get_rows_q2_K);
GGML_METAL_ADD_KERNEL(get_rows_q3_K);
GGML_METAL_ADD_KERNEL(get_rows_q4_K);
@@ -196,6 +202,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
+ GGML_METAL_ADD_KERNEL(mul_mat_q8_0_f32);
GGML_METAL_ADD_KERNEL(mul_mat_q2_K_f32);
GGML_METAL_ADD_KERNEL(mul_mat_q3_K_f32);
GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
@@ -203,6 +210,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q8_0_f32);
GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32);
GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32);
GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32);
@@ -218,12 +226,12 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
#undef GGML_METAL_ADD_KERNEL
}
- fprintf(stderr, "%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
- fprintf(stderr, "%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
+ fprintf(stderr, "%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
+ fprintf(stderr, "%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
if (ctx->device.maxTransferRate != 0) {
- fprintf(stderr, "%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
+ fprintf(stderr, "%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
} else {
- fprintf(stderr, "%s: maxTransferRate = built-in GPU\n", __func__);
+ fprintf(stderr, "%s: maxTransferRate = built-in GPU\n", __func__);
}
return ctx;
@@ -537,8 +545,8 @@ void ggml_metal_graph_compute(
id encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
- const int node_start = (cb_idx + 0) * n_nodes_per_cb;
- const int node_end = (cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb;
+ const int node_start = (cb_idx + 0) * n_nodes_per_cb;
+ const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
for (int ind = node_start; ind < node_end; ++ind) {
const int i = has_concur ? ctx->concur_list[ind] : ind;
@@ -744,32 +752,32 @@ void ggml_metal_graph_compute(
[ctx->device supportsFamily:MTLGPUFamilyApple7] &&
ne00%32 == 0 &&
ne11 > 1) {
- switch (src0->type) {
- case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
- case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break;
- case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break;
- case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break;
- case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break;
- case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break;
- case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break;
- case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break;
- default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
- }
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
- [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
- [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
- [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
- [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
- [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
- [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
- [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
- [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:8];
- [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:9];
- [encoder setBytes:&gqa length:sizeof(gqa) atIndex:10];
- [encoder setThreadgroupMemoryLength:8192 atIndex:0];
- [encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
+ switch (src0->type) {
+ case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
+ case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break;
+ case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break;
+ case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q8_0_f32]; break;
+ case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break;
+ case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break;
+ case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break;
+ case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break;
+ case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break;
+ default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
}
- else {
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
+ [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
+ [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
+ [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
+ [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
+ [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
+ [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:8];
+ [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:9];
+ [encoder setBytes:&gqa length:sizeof(gqa) atIndex:10];
+ [encoder setThreadgroupMemoryLength:8192 atIndex:0];
+ [encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
+ } else {
int nth0 = 32;
int nth1 = 1;
@@ -799,6 +807,15 @@ void ggml_metal_graph_compute(
nth1 = 8;
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_1_f32];
} break;
+ case GGML_TYPE_Q8_0:
+ {
+ GGML_ASSERT(ne02 == 1);
+ GGML_ASSERT(ne12 == 1);
+
+ nth0 = 8;
+ nth1 = 8;
+ [encoder setComputePipelineState:ctx->pipeline_mul_mat_q8_0_f32];
+ } break;
case GGML_TYPE_Q2_K:
{
GGML_ASSERT(ne02 == 1);
@@ -868,24 +885,24 @@ void ggml_metal_graph_compute(
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
- [encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
+ [encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
- if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
+ if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 ||
src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_Q4_K) {
- [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
else if (src0t == GGML_TYPE_Q3_K) {
#ifdef GGML_QKK_64
- [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
#else
- [encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
#endif
}
else if (src0t == GGML_TYPE_Q5_K) {
- [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
else if (src0t == GGML_TYPE_Q6_K) {
- [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
} else {
[encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
@@ -895,9 +912,10 @@ void ggml_metal_graph_compute(
case GGML_OP_GET_ROWS:
{
switch (src0->type) {
- case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
+ case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break;
+ case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q8_0]; break;
case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_K]; break;
case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_K]; break;
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break;
@@ -938,16 +956,17 @@ void ggml_metal_graph_compute(
} break;
case GGML_OP_NORM:
{
- const float eps = 1e-5f;
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
const int nth = 256;
[encoder setComputePipelineState:ctx->pipeline_norm];
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
- [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
- [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
- [encoder setBytes:&eps length:sizeof( float) atIndex:4];
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
+ [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
+ [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
+ [encoder setBytes:&eps length:sizeof( float) atIndex:4];
[encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
const int64_t nrows = ggml_nrows(src0);
@@ -990,7 +1009,9 @@ void ggml_metal_graph_compute(
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
+
const int nth = 32;
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
case GGML_OP_ROPE:
@@ -1005,8 +1026,8 @@ void ggml_metal_graph_compute(
memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
[encoder setComputePipelineState:ctx->pipeline_rope];
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
@@ -1057,24 +1078,24 @@ void ggml_metal_graph_compute(
default: GGML_ASSERT(false && "not implemented");
}
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
- [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
- [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
- [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
- [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
- [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
- [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
- [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
- [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
- [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
- [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
- [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
- [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
- [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
- [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
- [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
- [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
+ [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
+ [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
+ [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
+ [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
+ [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
+ [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
+ [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
+ [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
+ [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
+ [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
+ [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
+ [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
+ [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
+ [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
+ [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
+ [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
diff --git a/ggml-metal.metal b/ggml-metal.metal
index ce3541f4b..82e1a0c7a 100644
--- a/ggml-metal.metal
+++ b/ggml-metal.metal
@@ -18,6 +18,12 @@ typedef struct {
uint8_t qs[QK4_1 / 2]; // nibbles / quants
} block_q4_1;
+#define QK8_0 32
+typedef struct {
+ half d; // delta
+ int8_t qs[QK8_0]; // quants
+} block_q8_0;
+
kernel void kernel_add(
device const float * src0,
device const float * src1,
@@ -87,7 +93,12 @@ kernel void kernel_gelu(
device float * dst,
uint tpig[[thread_position_in_grid]]) {
float x = src0[tpig];
- dst[tpig] = 0.5f*x*(1.0f + tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
+
+ // BEWARE !!!
+ // Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs!
+ // This was observed with Falcon 7B and 40B models
+ //
+ dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
}
kernel void kernel_soft_max(
@@ -352,7 +363,7 @@ void mul_vec_q_n_f32(device const void * src0, device const float * src1, device
const int first_row = (r0 * nsg + sgitg) * nr;
const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
device const block_q_type * x = (device const block_q_type *) src0 + offset0;
- device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
+ device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[16]; // src1 vector cache
float sumf[nr]={0.f};
@@ -424,6 +435,68 @@ kernel void kernel_mul_mat_q4_1_f32(
mul_vec_q_n_f32(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
}
+kernel void kernel_mul_mat_q8_0_f32(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01[[buffer(4)]],
+ constant int64_t & ne02[[buffer(5)]],
+ constant int64_t & ne10[[buffer(9)]],
+ constant int64_t & ne12[[buffer(11)]],
+ constant int64_t & ne0[[buffer(15)]],
+ constant int64_t & ne1[[buffer(16)]],
+ constant uint & gqa[[buffer(17)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
+ const int nr = N_DST;
+ const int nsg = N_SIMDGROUP;
+ const int nw = N_SIMDWIDTH;
+
+ const int nb = ne00/QK8_0;
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+ const int first_row = (r0 * nsg + sgitg) * nr;
+ const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
+ device const block_q8_0 * x = (device const block_q8_0 *) src0 + offset0;
+ device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
+
+ float yl[16];
+ float sumf[nr]={0.f};
+
+ const int ix = tiisg/2;
+ const int il = tiisg%2;
+
+ device const float * yb = y + ix * QK8_0 + 16*il;
+
+ // each thread in a SIMD group deals with half a block.
+ for (int ib = ix; ib < nb; ib += nw/2) {
+ for (int i = 0; i < 16; ++i) {
+ yl[i] = yb[i];
+ }
+
+ for (int row = 0; row < nr; row++) {
+ device const int8_t * qs = x[ib+row*nb].qs + 16*il;
+ float sumq = 0.f;
+ for (int iq = 0; iq < 16; ++iq) {
+ sumq += qs[iq] * yl[iq];
+ }
+ sumf[row] += sumq*x[ib+row*nb].d;
+ }
+
+ yb += QK8_0 * 16;
+ }
+
+ for (int row = 0; row < nr; ++row) {
+ const float tot = simd_sum(sumf[row]);
+ if (tiisg == 0 && first_row + row < ne01) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
+ }
+ }
+}
+
kernel void kernel_mul_mat_f16_f32(
device const char * src0,
device const char * src1,
@@ -475,7 +548,6 @@ kernel void kernel_mul_mat_f16_f32(
}
}
-
kernel void kernel_alibi_f32(
device const float * src0,
device float * dst,
@@ -571,7 +643,25 @@ kernel void kernel_rope(
dst_data[1] = x0*sin_theta + x1*cos_theta;
}
} else {
- // TODO: implement
+ for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
+ for (int64_t ic = 0; ic < n_dims; ic += 2) {
+ const float cos_theta = cos(theta);
+ const float sin_theta = sin(theta);
+
+ theta *= theta_scale;
+
+ const int64_t i0 = ib*n_dims + ic/2;
+
+ device const float * const src = (device float *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ device float * dst_data = (device float *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[n_dims/2];
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
+ }
+ }
}
}
@@ -1598,12 +1688,12 @@ template
void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) {
device const uint16_t * qs = ((device const uint16_t *)xb + 1);
const half d = il ? (xb->d / 16.h) : xb->d;
- const half m = il ? (-8.h * 16.h) : -8.h;
+ const half m = il ? ( -8.h * 16.h) : -8.h;
const ushort mask0 = il ? 0x00F0 : 0x000F;
const ushort mask1 = il ? 0xF000 : 0x0F00;
for (int i=0;i<8;i++) {
- reg[i/2][2*(i%2)] = (((qs[i] & mask0)) + m) * d;
+ reg[i/2][2*(i%2)] = (((qs[i] & mask0) ) + m) * d;
reg[i/2][2*(i%2)+1] = (((qs[i] & mask1) >> 8) + m) * d;
}
}
@@ -1617,11 +1707,21 @@ void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg
const ushort mask1 = il ? 0xF000 : 0x0F00;
for (int i=0;i<8;i++) {
- reg[i/2][2*(i%2)] = (((qs[i] & mask0)) * d) + m;
+ reg[i/2][2*(i%2)] = (((qs[i] & mask0) ) * d) + m;
reg[i/2][2*(i%2)+1] = (((qs[i] & mask1) >> 8) * d) + m;
}
}
+template
+void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) {
+ device const int8_t * qs = ((device const int8_t *)xb->qs);
+ const half d = xb->d;
+
+ for (int i=0;i<16;i++) {
+ reg[i/4][i%4] = (qs[i + 16*il] * d);
+ }
+}
+
template
void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) {
const half d = xb->d;
@@ -1924,9 +2024,10 @@ kernel void kernel_mul_mm(device const uchar * src0,
typedef void (get_rows_t)(device const void *, device const int *, device float *, constant int64_t &, \
constant uint64_t &, constant uint64_t &, uint, uint, uint);
-template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows;
+template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows;
template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows;
template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows;
+template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows;
template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows;
template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows;
template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows;
@@ -1937,9 +2038,10 @@ typedef void (mat_mm_t)(device const uchar *, device const float *, device float
constant int64_t &, constant int64_t &, constant int64_t &, constant int64_t &, \
constant int64_t &, constant int64_t &, constant uint &, threadgroup uchar *, uint3, uint, uint);
-template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm;
+template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm;
template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm;
template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm;
+template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm;
template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm;
template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm;
template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm;
diff --git a/ggml.c b/ggml.c
index dffb97731..8cb5c404f 100644
--- a/ggml.c
+++ b/ggml.c
@@ -3554,9 +3554,9 @@ inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) {
inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
-static const float GELU_COEF_A = 0.044715f;
-static const float GELU_QUICK_COEF = -1.702f;
-static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
+static const float GELU_COEF_A = 0.044715f;
+static const float GELU_QUICK_COEF = -1.702f;
+static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
inline static float ggml_gelu_f32(float x) {
return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
@@ -5555,10 +5555,6 @@ struct ggml_tensor * ggml_repeat(
is_node = true;
}
- if (ggml_are_same_shape(a, b) && !is_node) {
- return a;
- }
-
struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
result->op = GGML_OP_REPEAT;
@@ -5789,6 +5785,7 @@ struct ggml_tensor * ggml_silu_back(
static struct ggml_tensor * ggml_norm_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
+ float eps,
bool inplace) {
bool is_node = false;
@@ -5799,7 +5796,7 @@ static struct ggml_tensor * ggml_norm_impl(
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- // TODO: maybe store epsilon here?
+ ggml_set_op_params(result, &eps, sizeof(eps));
result->op = GGML_OP_NORM;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
@@ -5810,14 +5807,16 @@ static struct ggml_tensor * ggml_norm_impl(
struct ggml_tensor * ggml_norm(
struct ggml_context * ctx,
- struct ggml_tensor * a) {
- return ggml_norm_impl(ctx, a, false);
+ struct ggml_tensor * a,
+ float eps) {
+ return ggml_norm_impl(ctx, a, eps, false);
}
struct ggml_tensor * ggml_norm_inplace(
struct ggml_context * ctx,
- struct ggml_tensor * a) {
- return ggml_norm_impl(ctx, a, true);
+ struct ggml_tensor * a,
+ float eps) {
+ return ggml_norm_impl(ctx, a, eps, true);
}
// ggml_rms_norm
@@ -10619,7 +10618,8 @@ static void ggml_compute_forward_norm_f32(
GGML_TENSOR_UNARY_OP_LOCALS;
- const float eps = 1e-5f; // TODO: make this a parameter
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
// TODO: optimize
for (int64_t i03 = 0; i03 < ne03; i03++) {
@@ -12537,7 +12537,7 @@ static void ggml_compute_forward_rope_f32(
dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
}
} else {
- // TODO: this is probably wrong, but I can't figure it out ..
+ // TODO: this might be wrong for ne0 != n_dims - need double check
// ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 0; ic < n_dims; ic += 2) {
@@ -12666,7 +12666,7 @@ static void ggml_compute_forward_rope_f16(
dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
}
} else {
- // TODO: this is probably wrong, but I can't figure it out ..
+ // TODO: this might be wrong for ne0 != n_dims - need double check
// ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 0; ic < n_dims; ic += 2) {
diff --git a/ggml.h b/ggml.h
index 3c48fd27f..421c0df60 100644
--- a/ggml.h
+++ b/ggml.h
@@ -909,14 +909,15 @@ extern "C" {
struct ggml_tensor * b);
// normalize along rows
- // TODO: eps is hardcoded to 1e-5 for now
GGML_API struct ggml_tensor * ggml_norm(
struct ggml_context * ctx,
- struct ggml_tensor * a);
+ struct ggml_tensor * a,
+ float eps);
GGML_API struct ggml_tensor * ggml_norm_inplace(
struct ggml_context * ctx,
- struct ggml_tensor * a);
+ struct ggml_tensor * a,
+ float eps);
GGML_API struct ggml_tensor * ggml_rms_norm(
struct ggml_context * ctx,
diff --git a/gguf.py b/gguf.py
old mode 100644
new mode 100755
index 465746718..5c37f0f0b
--- a/gguf.py
+++ b/gguf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
import shutil
import sys
import struct
@@ -29,12 +30,12 @@ KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
KEY_GENERAL_FILE_TYPE = "general.file_type"
# LLM
-KEY_LLM_CONTEXT_LENGTH = "{arch}.context_length"
-KEY_LLM_EMBEDDING_LENGTH = "{arch}.embedding_length"
-KEY_LLM_BLOCK_COUNT = "{arch}.block_count"
-KEY_LLM_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
-KEY_LLM_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
-KEY_LLM_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
+KEY_CONTEXT_LENGTH = "{arch}.context_length"
+KEY_EMBEDDING_LENGTH = "{arch}.embedding_length"
+KEY_BLOCK_COUNT = "{arch}.block_count"
+KEY_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
+KEY_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
+KEY_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
# attention
KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count"
@@ -582,7 +583,7 @@ class GGUFWriter:
self.add_string(KEY_GENERAL_AUTHOR, author)
def add_tensor_data_layout(self, layout: str):
- self.add_string(KEY_LLM_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
+ self.add_string(KEY_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
def add_url(self, url: str):
self.add_string(KEY_GENERAL_URL, url)
@@ -612,27 +613,27 @@ class GGUFWriter:
def add_context_length(self, length: int):
self.add_uint32(
- KEY_LLM_CONTEXT_LENGTH.format(arch=self.arch), length)
+ KEY_CONTEXT_LENGTH.format(arch=self.arch), length)
def add_embedding_length(self, length: int):
self.add_uint32(
- KEY_LLM_EMBEDDING_LENGTH.format(arch=self.arch), length)
+ KEY_EMBEDDING_LENGTH.format(arch=self.arch), length)
def add_block_count(self, length: int):
self.add_uint32(
- KEY_LLM_BLOCK_COUNT.format(arch=self.arch), length)
+ KEY_BLOCK_COUNT.format(arch=self.arch), length)
def add_feed_forward_length(self, length: int):
self.add_uint32(
- KEY_LLM_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
+ KEY_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
def add_parallel_residual(self, use: bool):
self.add_bool(
- KEY_LLM_USE_PARALLEL_RESIDUAL.format(arch=self.arch), use)
+ KEY_USE_PARALLEL_RESIDUAL.format(arch=self.arch), use)
def add_tensor_data_layout(self, layout: str):
self.add_string(
- KEY_LLM_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
+ KEY_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
def add_head_count(self, count: int):
self.add_uint32(
diff --git a/grammars/README.md b/grammars/README.md
new file mode 100644
index 000000000..7f3b11ca5
--- /dev/null
+++ b/grammars/README.md
@@ -0,0 +1,91 @@
+# GBNF Guide
+
+GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `examples/main` and `examples/server`.
+
+## Background
+
+[Bakus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features.
+
+## Basics
+
+In GBNF, we define *production rules* that specify how a *non-terminal* (rule name) can be replaced with sequences of *terminals* (characters, specifically Unicode [code points](https://en.wikipedia.org/wiki/Code_point)) and other non-terminals. The basic format of a production rule is `nonterminal ::= sequence...`.
+
+## Example
+
+Before going deeper, let's look at some of the features demonstrated in `grammars/chess.gbnf`, a small chess notation grammar:
+```
+# `root` specifies the pattern for the overall output
+root ::= (
+ # it must start with the characters "1. " followed by a sequence
+ # of characters that match the `move` rule, followed by a space, followed
+ # by another move, and then a newline
+ "1. " move " " move "\n"
+
+ # it's followed by one or more subsequent moves, numbered with one or two digits
+ ([1-9] [0-9]? ". " move " " move "\n")+
+)
+
+# `move` is an abstract representation, which can be a pawn, nonpawn, or castle.
+# The `[+#]?` denotes the possibility of checking or mate signs after moves
+move ::= (pawn | nonpawn | castle) [+#]?
+
+pawn ::= ...
+nonpawn ::= ...
+castle ::= ...
+```
+
+## Non-Terminals and Terminals
+
+Non-terminal symbols (rule names) stand for a pattern of terminals and other non-terminals. They are required to be a dashed lowercase word, like `move`, `castle`, or `check-mate`.
+
+Terminals are actual characters ([code points](https://en.wikipedia.org/wiki/Code_point)). They can be specified as a sequence like `"1"` or `"O-O"` or as ranges like `[1-9]` or `[NBKQR]`.
+
+## Characters and character ranges
+
+Terminals support the full range of Unicode. Unicode characters can be specified directly in the grammar, for example `hiragana ::= [ぁ-ゟ]`, or with escapes: 8-bit (`\xXX`), 16-bit (`\uXXXX`) or 32-bit (`\UXXXXXXXX`).
+
+Character ranges can be negated with `^`:
+```
+single-line ::= [^\n]+ "\n"`
+```
+
+## Sequences and Alternatives
+
+The order of symbols in a sequence matter. For example, in `"1. " move " " move "\n"`, the `"1. "` must come before the first `move`, etc.
+
+Alternatives, denoted by `|`, give different sequences that are acceptable. For example, in `move ::= pawn | nonpawn | castle`, `move` can be a `pawn` move, a `nonpawn` move, or a `castle`.
+
+Parentheses `()` can be used to group sequences, which allows for embedding alternatives in a larger rule or applying repetition and optptional symbols (below) to a sequence.
+
+## Repetition and Optional Symbols
+
+- `*` after a symbol or sequence means that it can be repeated zero or more times.
+- `+` denotes that the symbol or sequence should appear one or more times.
+- `?` makes the preceding symbol or sequence optional.
+
+## Comments and newlines
+
+Comments can be specified with `#`:
+```
+# defines optional whitspace
+ws ::= [ \t\n]+
+```
+
+Newlines are allowed between rules and between symbols or sequences nested inside parentheses. Additionally, a newline after an alternate marker `|` will continue the current rule, even outside of parentheses.
+
+## The root rule
+
+In a full grammar, the `root` rule always defines the starting point of the grammar. In other words, it specifies what the entire output must match.
+
+```
+# a grammar for lists
+root ::= ("- " item)+
+item ::= [^\n]+ "\n"
+```
+
+## Next steps
+
+This guide provides a brief overview. Check out the GBNF files in this directory (`grammars/`) for examples of full grammars. You can try them out with:
+```
+./main -m --grammar-file grammars/some-grammar.gbnf -p 'Some prompt'
+```
diff --git a/llama.cpp b/llama.cpp
index 6c5da1309..b5266c1e1 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -72,6 +72,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -80,20 +81,6 @@
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
-// tensor names
-#define TN_TOKEN_EMBD "token_embd.weight"
-#define TN_OUTPUT_NORM "output_norm.weight"
-#define TN_OUTPUT "output.weight"
-#define TN_ATTN_NORM "blk.%d.attn_norm.weight"
-#define TN_ATTN_Q "blk.%d.attn_q.weight"
-#define TN_ATTN_K "blk.%d.attn_k.weight"
-#define TN_ATTN_V "blk.%d.attn_v.weight"
-#define TN_ATTN_OUTPUT "blk.%d.attn_output.weight"
-#define TN_FFN_NORM "blk.%d.ffn_norm.weight"
-#define TN_FFN_GATE "blk.%d.ffn_gate.weight"
-#define TN_FFN_DOWN "blk.%d.ffn_down.weight"
-#define TN_FFN_UP "blk.%d.ffn_up.weight"
-
#ifdef __GNUC__
#ifdef __MINGW32__
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
@@ -107,6 +94,7 @@
//
// logging
//
+
LLAMA_ATTRIBUTE_FORMAT(2, 3)
static void llama_log_internal (llama_log_level level, const char* format, ...);
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
@@ -119,6 +107,21 @@ static void llama_log_callback_default(llama_log_level level, const char * text,
// helpers
//
+static size_t utf8_len(char src) {
+ const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
+ uint8_t highbits = static_cast(src) >> 4;
+ return lookup[highbits];
+}
+
+void replace_all(std::string & s, const std::string & search, const std::string & replace) {
+ for (size_t pos = 0; ; pos += replace.length()) {
+ pos = s.find(search, pos);
+ if (pos == std::string::npos) break;
+ s.erase(pos, search.length());
+ s.insert(pos, replace);
+ }
+}
+
static void zeros(std::ofstream & file, size_t n) {
char zero = 0;
for (size_t i = 0; i < n; ++i) {
@@ -142,6 +145,241 @@ static std::string format(const char * fmt, ...) {
return std::string(buf.data(), size);
}
+//
+// gguf constants (sync with gguf.py)
+//
+
+enum llm_arch {
+ LLM_ARCH_LLAMA,
+ LLM_ARCH_FALCON,
+ LLM_ARCH_GPT2,
+ LLM_ARCH_GPTJ,
+ LLM_ARCH_GPTNEOX,
+ LLM_ARCH_MPT,
+ LLM_ARCH_UNKNOWN,
+};
+
+static std::map LLM_ARCH_NAMES = {
+ { LLM_ARCH_LLAMA, "llama" },
+ { LLM_ARCH_FALCON, "falcon" },
+ { LLM_ARCH_GPT2, "gpt2" },
+ { LLM_ARCH_GPTJ, "gptj" },
+ { LLM_ARCH_GPTNEOX, "gptneox" },
+ { LLM_ARCH_MPT, "mpt" },
+};
+
+enum llm_kv {
+ LLM_KV_GENERAL_ARCHITECTURE,
+ LLM_KV_GENERAL_QUANTIZATION_VERSION,
+ LLM_KV_GENERAL_ALIGNMENT,
+ LLM_KV_GENERAL_NAME,
+ LLM_KV_GENERAL_AUTHOR,
+ LLM_KV_GENERAL_URL,
+ LLM_KV_GENERAL_DESCRIPTION,
+ LLM_KV_GENERAL_LICENSE,
+ LLM_KV_GENERAL_SOURCE_URL,
+ LLM_KV_GENERAL_SOURCE_HF_REPO,
+
+ LLM_KV_CONTEXT_LENGTH,
+ LLM_KV_EMBEDDING_LENGTH,
+ LLM_KV_BLOCK_COUNT,
+ LLM_KV_FEED_FORWARD_LENGTH,
+ LLM_KV_USE_PARALLEL_RESIDUAL,
+ LLM_KV_TENSOR_DATA_LAYOUT,
+
+ LLM_KV_ATTENTION_HEAD_COUNT,
+ LLM_KV_ATTENTION_HEAD_COUNT_KV,
+ LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
+ LLM_KV_ATTENTION_CLAMP_KQV,
+ LLM_KV_ATTENTION_LAYERNORM_EPS,
+ LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
+
+ LLM_KV_ROPE_DIMENSION_COUNT,
+ LLM_KV_ROPE_SCALE_LINEAR,
+
+ LLM_KV_TOKENIZER_MODEL,
+ LLM_KV_TOKENIZER_LIST,
+ LLM_KV_TOKENIZER_TOKEN_TYPE,
+ LLM_KV_TOKENIZER_SCORES,
+ LLM_KV_TOKENIZER_MERGES,
+ LLM_KV_TOKENIZER_BOS_ID,
+ LLM_KV_TOKENIZER_EOS_ID,
+ LLM_KV_TOKENIZER_UNK_ID,
+ LLM_KV_TOKENIZER_SEP_ID,
+ LLM_KV_TOKENIZER_PAD_ID,
+ LLM_KV_TOKENIZER_HF_JSON,
+ LLM_KV_TOKENIZER_RWKV,
+};
+
+static std::map LLM_KV_NAMES = {
+ { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
+ { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
+ { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
+ { LLM_KV_GENERAL_NAME, "general.name" },
+ { LLM_KV_GENERAL_AUTHOR, "general.author" },
+ { LLM_KV_GENERAL_URL, "general.url" },
+ { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
+ { LLM_KV_GENERAL_LICENSE, "general.license" },
+ { LLM_KV_GENERAL_SOURCE_URL, "general.source_url" },
+ { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source_hf_repo" },
+
+ { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
+ { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
+ { LLM_KV_BLOCK_COUNT, "%s.block_count" },
+ { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
+ { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
+ { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
+
+ { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
+ { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
+ { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
+ { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
+ { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
+ { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
+
+ { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
+ { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
+
+ { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
+ { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
+ { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
+ { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
+ { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
+ { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
+ { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
+ { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
+ { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
+ { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
+ { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
+ { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
+};
+
+struct LLM_KV {
+ LLM_KV(llm_arch arch) : arch(arch) {}
+
+ llm_arch arch;
+
+ std::string operator()(llm_kv kv) const {
+ return ::format(LLM_KV_NAMES[kv].c_str(), LLM_ARCH_NAMES[arch].c_str());
+ }
+};
+
+enum llm_tensor {
+ LLM_TENSOR_TOKEN_EMBD,
+ LLM_TENSOR_POS_EMBD,
+ LLM_TENSOR_OUTPUT,
+ LLM_TENSOR_OUTPUT_NORM,
+ LLM_TENSOR_ROPE_FREQS,
+ LLM_TENSOR_ATTN_Q,
+ LLM_TENSOR_ATTN_K,
+ LLM_TENSOR_ATTN_V,
+ LLM_TENSOR_ATTN_QKV,
+ LLM_TENSOR_ATTN_OUT,
+ LLM_TENSOR_ATTN_NORM,
+ LLM_TENSOR_ATTN_NORM_2,
+ LLM_TENSOR_ATTN_ROT_EMBD,
+ LLM_TENSOR_FFN_GATE,
+ LLM_TENSOR_FFN_DOWN,
+ LLM_TENSOR_FFN_UP,
+ LLM_TENSOR_FFN_NORM,
+};
+
+static std::map> LLM_TENSOR_NAMES = {
+ {
+ LLM_ARCH_LLAMA,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_FALCON,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+};
+
+static llm_arch llm_arch_from_string(const std::string & name) {
+ for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
+ if (kv.second == name) {
+ return kv.first;
+ }
+ }
+
+ return LLM_ARCH_UNKNOWN;
+}
+
+// helper to handle gguf constants
+// usage:
+//
+// const auto tn = LLM_TN(LLM_ARCH_LLAMA);
+//
+// std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
+// std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
+// std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
+//
+struct LLM_TN {
+ LLM_TN(llm_arch arch) : arch(arch) {}
+
+ llm_arch arch;
+
+ std::string operator()(llm_tensor tensor) const {
+ return LLM_TENSOR_NAMES[arch].at(tensor);
+ }
+
+ std::string operator()(llm_tensor tensor, const std::string & suffix) const {
+ return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
+ }
+
+ std::string operator()(llm_tensor tensor, int bid) const {
+ return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
+ }
+
+ std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
+ return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
+ }
+};
+
+//
+// gguf helpers
+//
+
+#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
+{ \
+ const std::string skey(key); \
+ const int kid = gguf_find_key(ctx, skey.c_str()); \
+ if (kid >= 0) { \
+ enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
+ if (ktype != (type)) { \
+ throw std::runtime_error(format("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype))); \
+ } \
+ (dst) = func(ctx, kid); \
+ } else if (req) { \
+ throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
+ } \
+}
+
//
// ggml helpers
//
@@ -589,12 +827,14 @@ enum e_model {
MODEL_7B,
MODEL_13B,
MODEL_30B,
+ MODEL_34B,
+ MODEL_40B,
MODEL_65B,
MODEL_70B,
};
static const size_t kB = 1024;
-static const size_t MB = 1024*1024;
+static const size_t MB = kB*kB;
// default hparams (LLaMA 7B)
struct llama_hparams {
@@ -608,6 +848,7 @@ struct llama_hparams {
uint32_t n_rot = 64;
uint32_t n_ff = 11008;
+ float f_norm_eps = 1e-5;
float f_norm_rms_eps = 1e-5;
float rope_freq_base = 10000.0f;
@@ -641,21 +882,25 @@ struct llama_hparams {
struct llama_layer {
// normalization
- struct ggml_tensor * attention_norm;
+ struct ggml_tensor * attn_norm;
+ struct ggml_tensor * attn_norm_b;
+ struct ggml_tensor * attn_norm_2;
+ struct ggml_tensor * attn_norm_2_b;
// attention
struct ggml_tensor * wq;
struct ggml_tensor * wk;
struct ggml_tensor * wv;
struct ggml_tensor * wo;
+ struct ggml_tensor * wqkv;
// normalization
struct ggml_tensor * ffn_norm;
// ff
- struct ggml_tensor * w1;
- struct ggml_tensor * w2;
- struct ggml_tensor * w3;
+ struct ggml_tensor * w1; // ffn_gate
+ struct ggml_tensor * w2; // ffn_down
+ struct ggml_tensor * w3; // ffn_up
};
struct llama_kv_cache {
@@ -681,10 +926,6 @@ struct llama_kv_cache {
};
struct llama_vocab {
- // TODO:
- // - add a vector of merges
- // so that we can pass it to different types of tokenizers with a common interface
-
using id = int32_t;
using token = std::string;
using ttype = llama_token_type;
@@ -695,34 +936,55 @@ struct llama_vocab {
ttype type;
};
- llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
+ enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
std::unordered_map token_to_id;
std::vector id_to_token;
+ std::map, int> bpe_ranks;
+
// default LLaMA special tokens
id special_bos_id = 1;
id special_eos_id = 2;
- id special_unk_id = -1;
+ id special_unk_id = 0;
id special_sep_id = -1;
id special_pad_id = -1;
id linefeed_id = 13;
+
+ int find_bpe_rank(std::string token_left, std::string token_right) const {
+ replace_all(token_left, " ", "Ġ");
+ replace_all(token_left, "\n", "Ċ");
+ replace_all(token_right, " ", "Ġ");
+ replace_all(token_right, "\n", "Ċ");
+
+ auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
+ if (it == bpe_ranks.end()) {
+ return -1;
+ }
+
+ return it->second;
+ }
};
struct llama_model {
e_model type = MODEL_UNKNOWN;
+ llm_arch arch = LLM_ARCH_UNKNOWN;
llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
+ std::string name = "n/a";
+
llama_hparams hparams;
llama_vocab vocab;
struct ggml_tensor * tok_embeddings;
- struct ggml_tensor * norm;
+ struct ggml_tensor * output_norm;
+ struct ggml_tensor * output_norm_b;
struct ggml_tensor * output;
std::vector layers;
+
int n_gpu_layers;
// context
@@ -800,8 +1062,6 @@ struct llama_context {
// key + value cache for the self attention
struct llama_kv_cache kv_self;
- size_t mem_per_token = 0;
-
// decode output (2-dimensional array: [n_tokens][n_vocab])
std::vector logits;
bool logits_all = false;
@@ -880,11 +1140,11 @@ static bool llama_kv_cache_init(
// model loading and saving
//
-enum llama_file_version {
+enum llama_fver {
GGUF_FILE_VERSION_V1 = 1,
};
-static const char * llama_file_version_name(llama_file_version version) {
+static const char * llama_file_version_name(llama_fver version) {
switch (version) {
case GGUF_FILE_VERSION_V1: return "GGUF V1 (latest)";
}
@@ -892,11 +1152,11 @@ static const char * llama_file_version_name(llama_file_version version) {
return "unknown";
}
-static std::string llama_format_tensor_shape(const std::vector & ne) {
+static std::string llama_format_tensor_shape(const std::vector & ne) {
char buf[256];
- snprintf(buf, sizeof(buf), "%5u", ne.at(0));
+ snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
for (size_t i = 1; i < ne.size(); i++) {
- snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5u", ne.at(i));
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
}
return buf;
}
@@ -919,9 +1179,9 @@ struct llama_model_loader {
bool use_mmap = false;
- llama_file file;
+ llama_file file;
llama_ftype ftype;
- llama_file_version fver;
+ llama_fver fver;
std::unique_ptr mapping;
@@ -942,7 +1202,7 @@ struct llama_model_loader {
n_kv = gguf_get_n_kv(ctx_gguf);
n_tensors = gguf_get_n_tensors(ctx_gguf);
- fver = (enum llama_file_version) gguf_get_version(ctx_gguf);
+ fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
for (int i = 0; i < n_tensors; i++) {
const char * name = gguf_get_tensor_name(ctx_gguf, i);
@@ -1039,6 +1299,21 @@ struct llama_model_loader {
}
}
+ std::string get_arch_name() const {
+ const auto kv = LLM_KV(LLM_ARCH_UNKNOWN);
+
+ std::string arch_name;
+ GGUF_GET_KEY(ctx_gguf, arch_name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_ARCHITECTURE));
+
+ return arch_name;
+ }
+
+ enum llm_arch get_arch() const {
+ const std::string arch_name = get_arch_name();
+
+ return llm_arch_from_string(arch_name);
+ }
+
const char * get_tensor_name(int i) const {
return gguf_get_tensor_name(ctx_gguf, i);
}
@@ -1076,7 +1351,7 @@ struct llama_model_loader {
return tensor;
}
- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend backend) {
+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend backend) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
if (cur == NULL) {
@@ -1244,228 +1519,281 @@ static const char * llama_model_type_name(e_model type) {
case MODEL_7B: return "7B";
case MODEL_13B: return "13B";
case MODEL_30B: return "30B";
+ case MODEL_34B: return "34B";
+ case MODEL_40B: return "40B";
case MODEL_65B: return "65B";
case MODEL_70B: return "70B";
- default: GGML_ASSERT(false);
+ default: return "?B";
}
}
-static void llama_model_load_internal(
- const std::string & fname,
+static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
+ model.arch = ml.get_arch();
+ if (model.arch == LLM_ARCH_UNKNOWN) {
+ throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
+ }
+}
+
+static void llm_load_hparams(
+ llama_model_loader & ml,
llama_model & model,
- llama_vocab & vocab,
int n_ctx,
+ float rope_freq_base,
+ float rope_freq_scale) {
+ struct gguf_context * ctx = ml.ctx_gguf;
+
+ const auto kv = LLM_KV(model.arch);
+
+ auto & hparams = model.hparams;
+
+ // get general kv
+ GGUF_GET_KEY(ctx, model.name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_NAME));
+
+ // get hparams kv
+ GGUF_GET_KEY(ctx, hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, kv(LLM_KV_TOKENIZER_LIST));
+ GGUF_GET_KEY(ctx, hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_CONTEXT_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
+ GGUF_GET_KEY(ctx, hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
+
+ // n_head_kv is optional, default to n_head
+ hparams.n_head_kv = hparams.n_head;
+ GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV));
+
+ // TODO: manually setting rope scale should override this
+ // rope_freq_scale (inverse of the kv) is optional
+ {
+ float ropescale = 1.0f;
+ GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
+ if (ropescale != 1.0f) {
+ rope_freq_scale = 1.0f/ropescale;
+ }
+ }
+
+ // sanity check for n_rot (optional)
+ {
+ hparams.n_rot = hparams.n_embd / hparams.n_head;
+
+ GGUF_GET_KEY(ctx, hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
+
+ if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
+ throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
+ }
+ }
+
+ // arch-specific KVs
+ switch (model.arch) {
+ case LLM_ARCH_LLAMA:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+
+ switch (hparams.n_layer) {
+ case 26: model.type = e_model::MODEL_3B; break;
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 40: model.type = e_model::MODEL_13B; break;
+ case 48: model.type = e_model::MODEL_34B; break;
+ case 60: model.type = e_model::MODEL_30B; break;
+ case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_FALCON:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 60: model.type = e_model::MODEL_40B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ default: (void)0;
+ };
+
+ model.ftype = ml.ftype;
+
+ hparams.n_ctx = n_ctx;
+ hparams.rope_freq_base = rope_freq_base;
+ hparams.rope_freq_scale = rope_freq_scale;
+}
+
+// TODO: This should probably be in llama.h
+static std::vector llama_tokenize_internal(const llama_vocab & vocab, const std::string & raw_text, bool bos, bool escape);
+
+static void llm_load_vocab(
+ llama_model_loader & ml,
+ llama_model & model) {
+ auto & vocab = model.vocab;
+
+ struct gguf_context * ctx = ml.ctx_gguf;
+
+ const auto kv = LLM_KV(model.arch);
+
+ const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
+ if (token_idx == -1) {
+ throw std::runtime_error("cannot find tokenizer vocab in model file\n");
+ }
+
+ const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
+ if (score_idx == -1) {
+ throw std::runtime_error("cannot find tokenizer scores in model file\n");
+ }
+
+ const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
+
+ const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
+ if (toktype_idx == -1) {
+ throw std::runtime_error("cannot find token type list in GGUF file\n");
+ }
+
+ const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
+
+ // determine vocab type
+ {
+ std::string tokenizer_name;
+
+ GGUF_GET_KEY(ctx, tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, kv(LLM_KV_TOKENIZER_MODEL));
+
+ if (tokenizer_name == "llama") {
+ vocab.type = LLAMA_VOCAB_TYPE_SPM;
+
+ // default special tokens
+ vocab.special_bos_id = 1;
+ vocab.special_eos_id = 2;
+ vocab.special_unk_id = 0;
+ vocab.special_sep_id = -1;
+ vocab.special_pad_id = -1;
+ } else if (tokenizer_name == "gpt2") {
+ vocab.type = LLAMA_VOCAB_TYPE_BPE;
+
+ // read bpe merges and populate bpe ranks
+ const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
+ if (merges_keyidx == -1) {
+ throw std::runtime_error("cannot find tokenizer merges in model file\n");
+ }
+
+ const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
+
+ for (int i = 0; i < n_merges; i++) {
+ const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
+
+ std::string first;
+ std::string second;
+
+ const size_t pos = word.find(' ', 1);
+
+ if (pos != std::string::npos) {
+ first = word.substr(0, pos);
+ second = word.substr(pos + 1);
+ }
+
+ vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
+ }
+
+ // default special tokens
+ vocab.special_bos_id = 11;
+ vocab.special_eos_id = 11;
+ vocab.special_unk_id = -1;
+ vocab.special_sep_id = -1;
+ vocab.special_pad_id = -1;
+ } else {
+ LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
+ LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
+
+ vocab.type = LLAMA_VOCAB_TYPE_SPM;
+ }
+ }
+
+ const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
+
+ vocab.id_to_token.resize(n_vocab);
+
+ for (uint32_t i = 0; i < n_vocab; i++) {
+ std::string word = gguf_get_arr_str(ctx, token_idx, i);
+
+ vocab.token_to_id[word] = i;
+
+ auto & token_data = vocab.id_to_token[i];
+ token_data.text = std::move(word);
+ token_data.score = scores[i];
+ token_data.type = (llama_token_type) toktypes[i];
+ }
+
+ // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
+ vocab.linefeed_id = llama_tokenize_internal(vocab, "\n", false, false)[0];
+
+ // special tokens
+ GGUF_GET_KEY(ctx, vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_BOS_ID));
+ GGUF_GET_KEY(ctx, vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_EOS_ID));
+ GGUF_GET_KEY(ctx, vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_UNK_ID));
+ GGUF_GET_KEY(ctx, vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_SEP_ID));
+ GGUF_GET_KEY(ctx, vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_PAD_ID));
+}
+
+static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
+ const auto & hparams = model.hparams;
+ const auto & vocab = model.vocab;
+
+ // hparams
+ LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
+ LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
+ LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
+ LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
+ LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
+ LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
+ LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
+ LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
+ LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
+ LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
+ LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
+ LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
+ LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
+ LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
+ LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
+ LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
+ LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
+ LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
+ LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
+ LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
+ LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml.n_elements*1e-9);
+
+ // general kv
+ LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
+
+ // special tokens
+ if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
+ if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
+ if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
+ if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
+ if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
+ if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+}
+
+static void llm_load_tensors(
+ llama_model_loader & ml,
+ llama_model & model,
int n_batch,
int n_gpu_layers,
int main_gpu,
const float * tensor_split,
const bool mul_mat_q,
- float rope_freq_base,
- float rope_freq_scale,
bool low_vram,
ggml_type memory_type,
- bool use_mmap,
bool use_mlock,
- bool vocab_only,
llama_progress_callback progress_callback,
void * progress_callback_user_data) {
model.t_start_us = ggml_time_us();
- std::unique_ptr ml(new llama_model_loader(fname, use_mmap));
-
- model.n_gpu_layers = n_gpu_layers;
-
+ auto & ctx = model.ctx;
auto & hparams = model.hparams;
- std::string general_name = "n/a";
- std::string general_arch = "n/a";
-
- // read hparams
- {
- struct gguf_context * ctx = ml->ctx_gguf;
-
-#define GGUF_GET(dst, func, type, req, key) \
- { \
- const int kid = gguf_find_key(ctx, key); \
- if (kid >= 0) { \
- enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
- if (ktype != (type)) { \
- throw std::runtime_error(format("key %s has wrong type: %s", key, gguf_type_name(ktype))); \
- } \
- (dst) = func(ctx, kid); \
- } else if (req) { \
- throw std::runtime_error(format("key not found in model: %s", key)); \
- } \
- }
-
- std::string tokenizer_name;
- GGUF_GET(tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, "tokenizer.ggml.model");
-
- if (tokenizer_name == "llama") {
- vocab.type = LLAMA_VOCAB_TYPE_SPM;
- } else if (tokenizer_name == "gpt2") {
- vocab.type = LLAMA_VOCAB_TYPE_BPE;
- } else {
- LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
- LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
- vocab.type = LLAMA_VOCAB_TYPE_SPM;
- }
-
- // get hparams kv
- GGUF_GET(hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, "tokenizer.ggml.tokens");
- GGUF_GET(hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length");
- GGUF_GET(hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.embedding_length");
- GGUF_GET(hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.feed_forward_length");
- GGUF_GET(hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.attention.head_count");
- GGUF_GET(hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.block_count");
- GGUF_GET(hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.rope.dimension_count");
- GGUF_GET(hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, "llama.attention.layer_norm_rms_epsilon");
-
- // n_head_kv is optional, default to n_head
- hparams.n_head_kv = hparams.n_head;
- GGUF_GET(hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "llama.attention.head_count_kv");
-
- // TODO: manually setting rope scale should override this
- // rope_freq_scale (inverse of the kv) is optional
- float ropescale = 1.0f;
- GGUF_GET(ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, "llama.rope.scale_linear");
- if (ropescale != 1.0f) {
- rope_freq_scale = 1.0f/ropescale;
- }
-
- // get general kv
- GGUF_GET(general_name, gguf_get_val_str, GGUF_TYPE_STRING, false, "general.name");
- GGUF_GET(general_arch, gguf_get_val_str, GGUF_TYPE_STRING, false, "general.architecture");
-
- // special tokens
- GGUF_GET(vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.bos_token_id");
- GGUF_GET(vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.eos_token_id");
- GGUF_GET(vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.unknown_token_id");
- GGUF_GET(vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.separator_token_id");
- GGUF_GET(vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.padding_token_id");
-
-#undef GGUF_GET
-
- switch (hparams.n_layer) {
- case 26: model.type = e_model::MODEL_3B; break;
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_13B; break;
- case 60: model.type = e_model::MODEL_30B; break;
- case 80: model.type = e_model::MODEL_65B; break;
- default:
- {
- if (hparams.n_layer < 32) {
- model.type = e_model::MODEL_7B;
- }
- } break;
- }
-
- model.ftype = ml->ftype;
-
- hparams.n_ctx = n_ctx;
-
- // LLaMAv2
- // TODO: probably not needed
- {
- const auto n_gqa = hparams.n_gqa();
-
- if (model.type == e_model::MODEL_65B && n_gqa == 8) {
- LLAMA_LOG_WARN("%s: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
- model.type = e_model::MODEL_70B;
- }
- }
-
- hparams.rope_freq_base = rope_freq_base;
- hparams.rope_freq_scale = rope_freq_scale;
- }
-
- // read vocab
- {
- struct gguf_context * ctx = ml->ctx_gguf;
-
- vocab.id_to_token.resize(hparams.n_vocab);
-
- const int token_idx = gguf_find_key(ctx, "tokenizer.ggml.tokens");
- if (token_idx == -1) {
- throw std::runtime_error("cannot find tokenizer vocab in model file\n");
- }
-
- const int score_idx = gguf_find_key(ctx, "tokenizer.ggml.scores");
- if (score_idx == -1) {
- throw std::runtime_error("cannot find tokenizer scores in model file\n");
- }
-
- const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
-
- const int toktype_idx = gguf_find_key(ctx, "tokenizer.ggml.token_type");
- if (toktype_idx == -1) {
- throw std::runtime_error("cannot find token type list in GGUF file\n");
- }
-
- const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
-
- for (uint32_t i = 0; i < hparams.n_vocab; i++) {
- std::string word = gguf_get_arr_str(ctx, token_idx, i);
-
- vocab.token_to_id[word] = i;
-
- auto & token_data = vocab.id_to_token[i];
- token_data.text = std::move(word);
- token_data.score = scores[i];
- token_data.type = (llama_token_type) toktypes[i];
-
- // determine the newline token: 0x0A == 10 == '\n'
- if (token_data.text == "<0x0A>") {
- vocab.linefeed_id = i;
- }
- }
- }
-
- {
- // hparams
- LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->fver));
- LLAMA_LOG_INFO("%s: arch = %s\n", __func__, general_arch.c_str());
- LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
- LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
- LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
- LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
- LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
- LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
- LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
- LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
- LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
- LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
- LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
- LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
- LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
- LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
- LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
- LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
- LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml->n_elements*1e-9);
-
- // general kv
- LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, general_name.c_str());
-
- // special tokens
- if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
- if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
- if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
- if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
- if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
- if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
- }
-
- if (vocab_only) {
- LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
- return;
- }
-
- auto & ctx = model.ctx;
+ model.n_gpu_layers = n_gpu_layers;
size_t ctx_size;
size_t mmapped_size;
- ml->calc_sizes(ctx_size, mmapped_size);
+ ml.calc_sizes(ctx_size, mmapped_size);
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
@@ -1480,7 +1808,7 @@ static void llama_model_load_internal(
struct ggml_init_params params = {
/*.mem_size =*/ model.buf.size,
/*.mem_buffer =*/ model.buf.data,
- /*.no_alloc =*/ ml->use_mmap,
+ /*.no_alloc =*/ ml.use_mmap,
};
model.ctx = ggml_init(params);
@@ -1509,75 +1837,146 @@ static void llama_model_load_internal(
// prepare memory for the weights
size_t vram_weights = 0;
{
- const uint32_t n_embd = hparams.n_embd;
- const uint32_t n_embd_gqa = hparams.n_embd_gqa();
- const uint32_t n_layer = hparams.n_layer;
- const uint32_t n_vocab = hparams.n_vocab;
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_vocab = hparams.n_vocab;
- model.tok_embeddings = ml->create_tensor(ctx, TN_TOKEN_EMBD, {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ const auto tn = LLM_TN(model.arch);
- // "output" tensor
- {
- ggml_backend backend_norm;
- ggml_backend backend_output;
- if (n_gpu_layers > int(n_layer)) { // NOLINT
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
+ switch (model.arch) {
+ case LLM_ARCH_LLAMA:
+ {
+ model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend backend_norm;
+ ggml_backend backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
#ifndef _WIN32
- backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
#else
- backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
#endif // _WIN32
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
- } else {
- backend_norm = GGML_BACKEND_CPU;
- backend_output = GGML_BACKEND_CPU;
- }
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
- model.norm = ml->create_tensor(ctx, TN_OUTPUT_NORM, {n_embd}, backend_norm);
- model.output = ml->create_tensor(ctx, TN_OUTPUT, {n_embd, n_vocab}, backend_output);
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.norm);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
- }
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
- const uint32_t n_ff = hparams.n_ff;
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
- const int i_gpu_start = n_layer - n_gpu_layers;
+ const uint32_t n_ff = hparams.n_ff;
- model.layers.resize(n_layer);
- for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const int i_gpu_start = n_layer - n_gpu_layers;
- auto & layer = model.layers[i];
- layer.attention_norm = ml->create_tensor(ctx, format(TN_ATTN_NORM, i), {n_embd}, backend);
+ model.layers.resize(n_layer);
- layer.wq = ml->create_tensor(ctx, format(TN_ATTN_Q, i), {n_embd, n_embd}, backend_split);
- layer.wk = ml->create_tensor(ctx, format(TN_ATTN_K, i), {n_embd, n_embd_gqa}, backend_split);
- layer.wv = ml->create_tensor(ctx, format(TN_ATTN_V, i), {n_embd, n_embd_gqa}, backend_split);
- layer.wo = ml->create_tensor(ctx, format(TN_ATTN_OUTPUT, i), {n_embd, n_embd}, backend_split);
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
- layer.ffn_norm = ml->create_tensor(ctx, format(TN_FFN_NORM, i), {n_embd}, backend);
+ auto & layer = model.layers[i];
- layer.w1 = ml->create_tensor(ctx, format(TN_FFN_GATE, i), {n_embd, n_ff}, backend_split);
- layer.w2 = ml->create_tensor(ctx, format(TN_FFN_DOWN, i), { n_ff, n_embd}, backend_split);
- layer.w3 = ml->create_tensor(ctx, format(TN_FFN_UP, i), {n_embd, n_ff}, backend_split);
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
- ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
- ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
- }
- }
+ layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
+ ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
+ ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ }
+ }
+ } break;
+ case LLM_ARCH_FALCON:
+ {
+ // TODO: CPU-only for now
+
+ model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend backend_norm;
+ ggml_backend backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+
+ if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
+ layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend);
+ layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend);
+ }
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ }
+ } break;
+ default:
+ throw std::runtime_error("unknown architecture");
+ };
}
- ml->done_getting_tensors();
+ ml.done_getting_tensors();
// print memory requirements
{
@@ -1589,8 +1988,7 @@ static void llama_model_load_internal(
mmapped_size - vram_weights; // weights in VRAM not in memory
// this is the memory required by one llama_state
- const size_t mem_required_state =
- scale*hparams.kv_size();
+ const size_t mem_required_state = scale*hparams.kv_size();
LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
@@ -1640,8 +2038,8 @@ static void llama_model_load_internal(
}
// populate `tensors_by_name`
- for (int i = 0; i < ml->n_tensors; ++i) {
- struct ggml_tensor * cur = ggml_get_tensor(ctx, ml->get_tensor_name(i));
+ for (int i = 0; i < ml.n_tensors; ++i) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
}
@@ -1652,13 +2050,13 @@ static void llama_model_load_internal(
}
#endif
- ml->load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
+ ml.load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
if (progress_callback) {
progress_callback(1.0f, progress_callback_user_data);
}
- model.mapping = std::move(ml->mapping);
+ model.mapping = std::move(ml.mapping);
// loading time will be recalculate after the first eval, so
// we take page faults deferred by mmap() into consideration
@@ -1668,7 +2066,6 @@ static void llama_model_load_internal(
static bool llama_model_load(
const std::string & fname,
llama_model & model,
- llama_vocab & vocab,
int n_ctx,
int n_batch,
int n_gpu_layers,
@@ -1685,17 +2082,36 @@ static bool llama_model_load(
llama_progress_callback progress_callback,
void *progress_callback_user_data) {
try {
- llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gpu_layers,
- main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type,
- use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
- return true;
+ std::unique_ptr ml(new llama_model_loader(fname, use_mmap));
+
+ llm_load_arch (*ml, model);
+ llm_load_hparams(*ml, model, n_ctx, rope_freq_base, rope_freq_scale);
+ llm_load_vocab (*ml, model);
+
+ llm_load_print_meta(*ml, model);
+
+ if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
+ throw std::runtime_error("vocab size mismatch");
+ }
+
+ if (vocab_only) {
+ LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
+ return true;
+ }
+
+ llm_load_tensors(
+ *ml, model, n_batch, n_gpu_layers,
+ main_gpu, tensor_split, mul_mat_q, low_vram, memory_type,
+ use_mlock, progress_callback, progress_callback_user_data);
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
return false;
}
+
+ return true;
}
-static struct ggml_cgraph * llama_build_graph(
+static struct ggml_cgraph * llm_build_llama(
llama_context & lctx,
const llama_token * tokens,
const float * embd,
@@ -1729,8 +2145,7 @@ static struct ggml_cgraph * llama_build_graph(
const int n_gpu_layers = model.n_gpu_layers;
- auto & mem_per_token = lctx.mem_per_token;
- auto & buf_compute = lctx.buf_compute;
+ auto & buf_compute = lctx.buf_compute;
struct ggml_init_params params = {
/*.mem_size =*/ buf_compute.size,
@@ -1820,8 +2235,8 @@ static struct ggml_cgraph * llama_build_graph(
offload_func(cur);
ggml_set_name(cur, "rms_norm_0");
- // cur = cur*attention_norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm);
+ // cur = cur*attn_norm(broadcasted)
+ cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
offload_func(cur);
ggml_set_name(cur, "attention_norm_0");
}
@@ -1872,10 +2287,7 @@ static struct ggml_cgraph * llama_build_graph(
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
}
- struct ggml_tensor * Q =
- ggml_permute(ctx0,
- Qcur,
- 0, 2, 1, 3);
+ struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
offload_func_kq(Q);
ggml_set_name(Q, "Q");
@@ -2005,14 +2417,16 @@ static struct ggml_cgraph * llama_build_graph(
inpL = cur;
}
+ cur = inpL;
+
// norm
{
- cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps);
+ cur = ggml_rms_norm(ctx0, cur, norm_rms_eps);
offload_func_nr(cur);
ggml_set_name(cur, "rms_norm_2");
// cur = cur*norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.norm);
+ cur = ggml_mul(ctx0, cur, model.output_norm);
// offload_func_nr(cur); // TODO CPU + GPU mirrored backend
ggml_set_name(cur, "result_norm");
}
@@ -2021,20 +2435,344 @@ static struct ggml_cgraph * llama_build_graph(
cur = ggml_mul_mat(ctx0, model.output, cur);
ggml_set_name(cur, "result_output");
- // logits -> probs
- //cur = ggml_soft_max_inplace(ctx0, cur);
-
ggml_build_forward_expand(gf, cur);
- if (mem_per_token == 0) {
- mem_per_token = ggml_used_mem(ctx0)/N;
- }
-
ggml_free(ctx0);
return gf;
}
+static struct ggml_cgraph * llm_build_falcon(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past) {
+
+ GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+
+ const int N = n_tokens;
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+
+ const auto & kv_self = lctx.kv_self;
+
+ GGML_ASSERT(!!kv_self.ctx);
+
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_ctx = hparams.n_ctx;
+ const int64_t n_head = hparams.n_head;
+ const int64_t n_head_kv = hparams.n_head_kv;
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+ const float freq_base = hparams.rope_freq_base;
+ const float freq_scale = hparams.rope_freq_scale;
+ const float norm_eps = hparams.f_norm_eps;
+
+ const int n_gpu_layers = model.n_gpu_layers;
+
+ auto & buf_compute = lctx.buf_compute;
+
+ struct ggml_init_params params = {
+ /*.mem_size =*/ buf_compute.size,
+ /*.mem_buffer =*/ buf_compute.data,
+ /*.no_alloc =*/ false,
+ };
+
+ params.no_alloc = true;
+
+ struct ggml_context * ctx0 = ggml_init(params);
+
+ ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ if (tokens) {
+ struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+
+ ggml_allocr_alloc(lctx.alloc, inp_tokens);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ }
+ ggml_set_name(inp_tokens, "inp_tokens");
+
+ inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
+ } else {
+#ifdef GGML_USE_MPI
+ GGML_ASSERT(false && "not implemented");
+#endif
+
+ inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+
+ ggml_allocr_alloc(lctx.alloc, inpL);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ }
+ }
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+ (void) i_gpu_start;
+
+ // offload functions set the tensor output backend to GPU
+ // tensors are GPU-accelerated if any input or the output has been offloaded
+ //
+ // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
+ // in that case ggml_cuda_assign_buffers has no effect
+ offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
+ offload_func_t offload_func_kq = llama_nop;
+ offload_func_t offload_func_v = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (n_gpu_layers > n_layer) {
+ offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 1) {
+ offload_func_v = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 2) {
+ offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ ggml_allocr_alloc(lctx.alloc, KQ_scale);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ }
+ ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * attn_norm;
+
+ offload_func_t offload_func = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (il >= i_gpu_start) {
+ offload_func = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ // self-attention
+ // TODO: refactor into common function (shared with LLaMA)
+ {
+ attn_norm = ggml_norm(ctx0, inpL, norm_eps);
+ offload_func(attn_norm);
+
+ attn_norm = ggml_add(ctx0,
+ ggml_mul(ctx0, attn_norm, model.layers[il].attn_norm),
+ model.layers[il].attn_norm_b);
+ offload_func(attn_norm->src[0]);
+ offload_func(attn_norm);
+
+ if (model.layers[il].attn_norm_2) { // Falcon-40B
+ cur = ggml_norm(ctx0, inpL, norm_eps);
+ offload_func(cur);
+
+ cur = ggml_add(ctx0,
+ ggml_mul(ctx0, cur, model.layers[il].attn_norm_2),
+ model.layers[il].attn_norm_2_b);
+ offload_func(cur->src[0]);
+ offload_func(cur);
+ } else { // Falcon 7B
+ cur = attn_norm;
+ }
+
+ // compute QKV
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ offload_func_kq(cur);
+
+ // Note that the strides for Kcur, Vcur are set up so that the
+ // resulting views are misaligned with the tensor's storage
+ // (by applying the K/V offset we shift the tensor's original
+ // view to stick out behind the viewed QKV tensor's allocated
+ // memory, so to say). This is ok because no actual accesses
+ // happen to that out-of-range memory, but it can require some
+ // trickery when trying to accurately dump these views for
+ // debugging.
+
+ const size_t wsize = ggml_type_size(cur->type);
+
+ struct ggml_tensor * tmpq = ggml_view_3d(
+ ctx0, cur, n_embd_head, n_head, N,
+ wsize * n_embd_head,
+ wsize * n_embd_head * (n_head + 2 * n_head_kv),
+ 0);
+ offload_func_kq(tmpq);
+
+ struct ggml_tensor * tmpk = ggml_view_3d(
+ ctx0, cur, n_embd_head, n_head_kv, N,
+ wsize * n_embd_head,
+ wsize * n_embd_head * (n_head + 2 * n_head_kv),
+ wsize * n_embd_head * n_head);
+ offload_func_kq(tmpk);
+
+ struct ggml_tensor * tmpv = ggml_view_3d(
+ ctx0, cur, n_embd_head, n_head_kv, N,
+ wsize * n_embd_head,
+ wsize * n_embd_head * (n_head + 2 * n_head_kv),
+ wsize * n_embd_head * (n_head + n_head_kv));
+ offload_func_v(tmpv);
+
+ // using mode = 2 for neox mode
+ struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, tmpq, n_past, n_embd_head, 2, 0, freq_base, freq_scale);
+ offload_func_kq(Qcur);
+ struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, tmpk, n_past, n_embd_head, 2, 0, freq_base, freq_scale);
+ offload_func_kq(Kcur);
+
+ {
+ struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, N));
+ offload_func_v(Vcur);
+ offload_func_v(Vcur->src[0]->src[0]);
+ ggml_set_name(Vcur, "Vcur");
+
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
+ offload_func_kq(k);
+ ggml_set_name(k, "k");
+
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
+ ( n_ctx)*ggml_element_size(kv_self.v),
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
+ offload_func_v(v);
+
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
+ }
+
+ struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
+ offload_func_kq(Q);
+ ggml_set_name(Q, "Q");
+
+ struct ggml_tensor * K =
+ ggml_view_3d(ctx0, kv_self.k,
+ n_embd_head, n_past + N, n_head_kv,
+ ggml_element_size(kv_self.k)*n_embd_gqa,
+ ggml_element_size(kv_self.k)*n_embd_head,
+ ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
+ offload_func_kq(K);
+ ggml_set_name(K, "K");
+
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+ offload_func_kq(KQ);
+ ggml_set_name(KQ, "KQ");
+
+ struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
+ offload_func_kq(KQ_scaled);
+ ggml_set_name(KQ_scaled, "KQ_scaled");
+
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
+ offload_func_kq(KQ_masked);
+ ggml_set_name(KQ_masked, "KQ_masked");
+
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
+ offload_func_v(KQ_soft_max);
+ ggml_set_name(KQ_soft_max, "KQ_soft_max");
+
+ struct ggml_tensor * V =
+ ggml_view_3d(ctx0, kv_self.v,
+ n_past + N, n_embd_head, n_head_kv,
+ ggml_element_size(kv_self.v)*n_ctx,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
+ offload_func_v(V);
+ ggml_set_name(V, "V");
+
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ offload_func_v(KQV);
+ ggml_set_name(KQV, "KQV");
+
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+ offload_func_v(KQV_merged);
+ ggml_set_name(KQV_merged, "KQV_merged");
+
+ cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
+ offload_func_v(cur);
+ ggml_set_name(cur, "KQV_merged_contiguous");
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_wo");
+ }
+
+ struct ggml_tensor * attn_out = cur;
+
+ // feed forward
+ {
+ struct ggml_tensor * inpFF = attn_norm;
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].w3, inpFF);
+ offload_func(cur);
+
+ cur = ggml_gelu(ctx0, cur);
+ offload_func(cur);
+ cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur);
+ offload_func(cur);
+ }
+
+ cur = ggml_add(ctx0, cur, attn_out);
+ offload_func(cur);
+ cur = ggml_add(ctx0, cur, inpL);
+ offload_func(cur);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ // norm
+ {
+ cur = ggml_norm(ctx0, cur, norm_eps);
+ offload_func_nr(cur);
+
+ cur = ggml_add(ctx0,
+ ggml_mul(ctx0, cur, model.output_norm),
+ model.output_norm_b);
+ ggml_set_name(cur, "result_norm");
+ }
+
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ ggml_set_name(cur, "result_output");
+
+ ggml_build_forward_expand(gf, cur);
+
+ ggml_free(ctx0);
+
+ return gf;
+}
+
+static struct ggml_cgraph * llama_build_graph(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past) {
+ const auto & model = lctx.model;
+
+ struct ggml_cgraph * result = NULL;
+
+ switch (model.arch) {
+ case LLM_ARCH_LLAMA:
+ {
+ result = llm_build_llama(lctx, tokens, embd, n_tokens, n_past);
+ } break;
+ case LLM_ARCH_FALCON:
+ {
+ result = llm_build_falcon(lctx, tokens, embd, n_tokens, n_past);
+ } break;
+ default:
+ GGML_ASSERT(false);
+ };
+
+ return result;
+}
+
// evaluate the transformer
//
// - lctx: llama context
@@ -2077,8 +2815,8 @@ static bool llama_eval_internal(
GGML_ASSERT(!!kv_self.ctx);
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_vocab = hparams.n_vocab;
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_vocab = hparams.n_vocab;
ggml_allocr_reset(lctx.alloc);
@@ -2108,11 +2846,11 @@ static bool llama_eval_internal(
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
- GGML_ASSERT(strcmp(res->name, "result_output") == 0);
- GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
#if GGML_USE_MPI
const int64_t n_layer = hparams.n_layer;
@@ -2264,20 +3002,11 @@ static std::string llama_escape_whitespace(const std::string& text) {
return result;
}
-static std::string llama_unescape_whitespace(const std::string& word) {
- if (word.length() >= 3 && word.substr(0, 3) == "\xe2\x96\x81") {
- return std::string(" ") + word.substr(3);
- }
- return word;
+static void llama_unescape_whitespace(std::string & word) {
+ replace_all(word, "\xe2\x96\x81", " ");
}
-static size_t utf8_len(char src) {
- const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
- uint8_t highbits = static_cast(src) >> 4;
- return lookup[highbits];
-}
-
-struct llama_sp_symbol {
+struct llm_symbol {
using index = int;
index prev;
index next;
@@ -2285,33 +3014,35 @@ struct llama_sp_symbol {
size_t n;
};
-static_assert(std::is_trivially_copyable::value, "llama_sp_symbol is not trivially copyable");
+static_assert(std::is_trivially_copyable::value, "llm_symbol is not trivially copyable");
-struct llama_sp_bigram {
+// SPM tokenizer
+// original implementation:
+// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
+
+struct llm_bigram_spm {
struct comparator {
- bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
+ bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
return (l.score < r.score) || (l.score == r.score && l.left > r.left);
}
};
- using queue_storage = std::vector;
- using queue = std::priority_queue;
- llama_sp_symbol::index left;
- llama_sp_symbol::index right;
+ using queue_storage = std::vector;
+ using queue = std::priority_queue;
+ llm_symbol::index left;
+ llm_symbol::index right;
float score;
size_t size;
};
-// original implementation:
-// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
-struct llama_tokenizer {
- llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
+struct llm_tokenizer_spm {
+ llm_tokenizer_spm(const llama_vocab & vocab): vocab(vocab) {}
void tokenize(const std::string & text, std::vector & output) {
// split string into utf8 chars
int index = 0;
size_t offs = 0;
while (offs < text.size()) {
- llama_sp_symbol sym;
+ llm_symbol sym;
size_t len = utf8_len(text[offs]);
GGML_ASSERT(offs + len <= text.size());
sym.text = text.c_str() + offs;
@@ -2320,21 +3051,21 @@ struct llama_tokenizer {
sym.prev = index - 1;
sym.next = offs == text.size() ? -1 : index + 1;
index++;
- symbols_.emplace_back(sym);
+ symbols.emplace_back(sym);
}
// seed the work queue with all possible 2-character tokens.
- for (size_t i = 1; i < symbols_.size(); ++i) {
+ for (size_t i = 1; i < symbols.size(); ++i) {
try_add_bigram(i - 1, i);
}
// keep substituting the highest frequency pairs for as long as we can.
- while (!work_queue_.empty()) {
- auto bigram = work_queue_.top();
- work_queue_.pop();
+ while (!work_queue.empty()) {
+ auto bigram = work_queue.top();
+ work_queue.pop();
- auto & left_sym = symbols_[bigram.left];
- auto & right_sym = symbols_[bigram.right];
+ auto & left_sym = symbols[bigram.left];
+ auto & right_sym = symbols[bigram.right];
// if one of the symbols already got merged, skip it.
if (left_sym.n == 0 || right_sym.n == 0 ||
@@ -2351,7 +3082,7 @@ struct llama_tokenizer {
// remove the right sym from the chain
left_sym.next = right_sym.next;
if (right_sym.next >= 0) {
- symbols_[right_sym.next].prev = bigram.left;
+ symbols[right_sym.next].prev = bigram.left;
}
// find more substitutions
@@ -2359,19 +3090,19 @@ struct llama_tokenizer {
try_add_bigram(bigram.left, left_sym.next);
}
- for (int i = 0; i != -1; i = symbols_[i].next) {
- auto & symbol = symbols_[i];
+ for (int i = 0; i != -1; i = symbols[i].next) {
+ auto & symbol = symbols[i];
resegment(symbol, output);
}
}
private:
- void resegment(llama_sp_symbol &symbol, std::vector &output) {
+ void resegment(llm_symbol & symbol, std::vector & output) {
auto text = std::string(symbol.text, symbol.n);
- auto token = vocab_.token_to_id.find(text);
+ auto token = vocab.token_to_id.find(text);
// Do we need to support is_unused?
- if (token != vocab_.token_to_id.end()) {
+ if (token != vocab.token_to_id.end()) {
output.push_back((*token).second);
return;
}
@@ -2381,14 +3112,14 @@ private:
if (p == rev_merge.end()) {
// output any symbols that did not form tokens as bytes.
for (int j = 0; j < (int)symbol.n; ++j) {
- llama_vocab::id token_id = llama_byte_to_token(vocab_, symbol.text[j]);
+ llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
output.push_back(token_id);
}
return;
}
- resegment(symbols_[p->second.first], output);
- resegment(symbols_[p->second.second], output);
+ resegment(symbols[p->second.first], output);
+ resegment(symbols[p->second.second], output);
}
void try_add_bigram(int left, int right) {
@@ -2396,56 +3127,261 @@ private:
return;
}
- const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
- auto token = vocab_.token_to_id.find(text);
+ const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
+ auto token = vocab.token_to_id.find(text);
- if (token == vocab_.token_to_id.end()) {
+ if (token == vocab.token_to_id.end()) {
return;
}
- if (static_cast((*token).second) >= vocab_.id_to_token.size()) {
+ if (static_cast((*token).second) >= vocab.id_to_token.size()) {
return;
}
- const auto &tok_data = vocab_.id_to_token[(*token).second];
+ const auto & tok_data = vocab.id_to_token[(*token).second];
- llama_sp_bigram bigram;
- bigram.left = left;
+ llm_bigram_spm bigram;
+ bigram.left = left;
bigram.right = right;
bigram.score = tok_data.score;
- bigram.size = text.size();
- work_queue_.push(bigram);
+ bigram.size = text.size();
+
+ work_queue.push(bigram);
// Do we need to support is_unused?
rev_merge[text] = std::make_pair(left, right);
}
- const llama_vocab & vocab_;
- std::vector symbols_;
- llama_sp_bigram::queue work_queue_;
- std::map > rev_merge;
+ const llama_vocab & vocab;
+
+ std::vector symbols;
+ llm_bigram_spm::queue work_queue;
+
+ std::map> rev_merge;
+};
+
+// BPE tokenizer
+// adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
+// tried to simplify unicode stuff, so most likely does not work 100% correctly!
+
+// TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
+
+struct llm_bigram_bpe {
+ struct comparator {
+ bool operator()(llm_bigram_bpe & l, llm_bigram_bpe & r) {
+ return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
+ }
+ };
+
+ using queue_storage = std::vector;
+ using queue = std::priority_queue;
+ llm_symbol::index left;
+ llm_symbol::index right;
+ std::string text;
+ int rank;
+ size_t size;
+};
+
+struct llm_tokenizer_bpe {
+ llm_tokenizer_bpe(const llama_vocab & vocab, bool g2ws): vocab(vocab) { flag_g2ws = g2ws; }
+
+ void tokenize(const std::string & text, std::vector & output) {
+ int final_prev_index = -1;
+ auto word_collection = bpe_gpt2_preprocess(text);
+
+ symbols_final.clear();
+
+ for (auto & word : word_collection) {
+ work_queue = llm_bigram_bpe::queue();
+ symbols.clear();
+
+ int index = 0;
+ size_t offset = 0;
+
+ while (offset < word.size()) {
+ llm_symbol sym;
+ size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
+ sym.text = word.c_str() + offset;
+ sym.n = 1;
+ sym.n = char_len;
+ offset += sym.n;
+ sym.prev = index - 1;
+ sym.next = offset == word.size() ? -1 : index + 1;
+ index++;
+ symbols.emplace_back(sym);
+ }
+ for (size_t i = 1; i < symbols.size(); ++i) {
+ add_new_bigram(i - 1, i);
+ }
+
+ // build token(s)
+ while (!work_queue.empty()) {
+ auto bigram = work_queue.top();
+ work_queue.pop();
+
+ auto & left_symbol = symbols[bigram.left];
+ auto & right_symbol = symbols[bigram.right];
+
+ if (left_symbol.n == 0 || right_symbol.n == 0) {
+ continue;
+ }
+ std::string left_token = std::string(left_symbol.text, left_symbol.n);
+ std::string right_token = std::string(right_symbol.text, right_symbol.n);
+ if (left_token + right_token != bigram.text) {
+ continue; // Skip this bigram if it's outdated
+ }
+
+ // merge the right sym into the left one
+ left_symbol.n += right_symbol.n;
+ right_symbol.n = 0;
+
+ // remove the right sym from the chain
+ left_symbol.next = right_symbol.next;
+ if (right_symbol.next >= 0) {
+ symbols[right_symbol.next].prev = bigram.left;
+ }
+
+ add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
+ add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
+ }
+
+ // add the fnished tokens to the final list keeping correct order for next and prev
+ for (auto & sym : symbols) {
+ if (sym.n > 0) {
+ sym.prev = final_prev_index;
+ sym.next = -1;
+ if (final_prev_index != -1) {
+ symbols_final[final_prev_index].next = symbols_final.size();
+ }
+ symbols_final.emplace_back(sym);
+ final_prev_index = symbols_final.size() - 1;
+ }
+ }
+ }
+
+ symbols = symbols_final;
+
+ if (!symbols.empty()) {
+ for (int i = 0; i != -1; i = symbols[i].next) {
+ auto & symbol = symbols[i];
+ if (symbol.n == 0) {
+ continue;
+ }
+
+ const std::string str = std::string(symbol.text, symbol.n);
+ const auto token = vocab.token_to_id.find(str);
+
+ if (token == vocab.token_to_id.end()) {
+ for (auto j = str.begin(); j != str.end(); ++j) {
+ std::string byte_str(1, *j);
+ auto token_multibyte = vocab.token_to_id.find(byte_str);
+ if (token_multibyte == vocab.token_to_id.end()) {
+ fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str());
+ }
+ output.push_back((*token_multibyte).second);
+ }
+ } else {
+ output.push_back((*token).second);
+ }
+ }
+ }
+ }
+
+private:
+ void add_new_bigram(int left, int right) {
+ if (left == -1 || right == -1) {
+ return;
+ }
+
+ std::string left_token = std::string(symbols[left].text, symbols[left].n);
+ std::string right_token = std::string(symbols[right].text, symbols[right].n);
+
+ int rank_found = -1;
+
+ rank_found = vocab.find_bpe_rank(left_token, right_token);
+
+ if (rank_found < 0) {
+ return;
+ }
+
+ llm_bigram_bpe bigram;
+
+ bigram.left = left;
+ bigram.right = right;
+ bigram.text = left_token + right_token;
+ bigram.size = left_token.size() + right_token.size();
+ bigram.rank = rank_found;
+
+ work_queue.push(bigram);
+ }
+
+ // probably not 100% correct
+ // TODO: this is quite slow - how to make it more efficient?
+ static std::vector bpe_gpt2_preprocess(std::string text) {
+ std::vector words;
+
+ // ref: https://github.com/openai/gpt-2/blob/a74da5d99abaaba920de8131d64da2862a8f213b/src/encoder.py#L53
+ const std::string pattern = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)";
+ const std::regex re(pattern);
+ std::smatch m;
+
+ while (std::regex_search(text, m, re)) {
+ for (auto x : m) {
+ words.push_back(x);
+ }
+ text = m.suffix();
+ }
+
+ return words;
+ }
+
+ bool flag_g2ws = false;
+
+ const llama_vocab & vocab;
+
+ std::vector symbols;
+ std::vector symbols_final;
+
+ llm_bigram_bpe::queue work_queue;
};
static std::vector llama_tokenize_internal(const llama_vocab & vocab, const std::string & raw_text, bool bos, bool escape) {
- llama_tokenizer tokenizer(vocab);
std::vector output;
if (raw_text.empty()) {
return output;
}
- if (bos) {
- output.push_back(vocab.special_bos_id);
- }
+ switch (vocab.type) {
+ case LLAMA_VOCAB_TYPE_SPM:
+ {
+ llm_tokenizer_spm tokenizer(vocab);
- std::string text;
- if (escape) {
- text = llama_escape_whitespace(raw_text);
- } else {
- text = raw_text;
- }
+ if (bos) {
+ output.push_back(vocab.special_bos_id);
+ }
+
+ std::string text;
+ if (escape) {
+ text = llama_escape_whitespace(raw_text);
+ } else {
+ text = raw_text;
+ }
+
+ tokenizer.tokenize(text, output);
+ } break;
+ case LLAMA_VOCAB_TYPE_BPE:
+ {
+ llm_tokenizer_bpe tokenizer(vocab, escape);
+
+ if (bos && vocab.special_bos_id != -1) {
+ output.push_back(vocab.special_bos_id);
+ }
+
+ tokenizer.tokenize(raw_text, output);
+ } break;
+ };
- tokenizer.tokenize(text, output);
return output;
}
@@ -3133,7 +4069,7 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c
if (!allow_eos) {
candidates->data[i].logit = -INFINITY;
}
- } else if (text.empty()) {
+ } else if (text.empty() || text[0] == 0) {
candidates->data[i].logit = -INFINITY;
} else {
candidates_decoded.push_back(decode_utf8(text.c_str(), grammar->partial_utf8));
@@ -3449,13 +4385,13 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
nthread = std::thread::hardware_concurrency();
}
- std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false));
+ std::unique_ptr ml(new llama_model_loader(fname_inp, /*use_mmap*/ false));
const size_t align = GGUF_DEFAULT_ALIGNMENT;
struct gguf_context * ctx_out = gguf_init_empty();
// copy the KV pairs from the input file
- gguf_set_kv (ctx_out, model_loader->ctx_gguf);
+ gguf_set_kv (ctx_out, ml->ctx_gguf);
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
@@ -3463,8 +4399,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
int n_attention_wv = 0;
int n_feed_forward_w2 = 0;
- for (int i = 0; i < model_loader->n_tensors; ++i) {
- struct ggml_tensor * meta = model_loader->get_tensor_meta(i);
+ for (int i = 0; i < ml->n_tensors; ++i) {
+ struct ggml_tensor * meta = ml->get_tensor_meta(i);
const std::string name = ggml_get_name(meta);
@@ -3498,8 +4434,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
std::vector work;
// populate the original tensors so we get an initial meta data
- for (int i = 0; i < model_loader->n_tensors; ++i) {
- struct ggml_tensor * meta = model_loader->get_tensor_meta(i);
+ for (int i = 0; i < ml->n_tensors; ++i) {
+ struct ggml_tensor * meta = ml->get_tensor_meta(i);
gguf_add_tensor(ctx_out, meta);
}
@@ -3512,17 +4448,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
// placeholder for the meta data
::zeros(fout, meta_size);
- for (int i = 0; i < model_loader->n_tensors; ++i) {
- struct ggml_tensor * tensor = model_loader->get_tensor_meta(i);
+ for (int i = 0; i < ml->n_tensors; ++i) {
+ struct ggml_tensor * tensor = ml->get_tensor_meta(i);
const std::string name = ggml_get_name(tensor);
read_data.resize(ggml_nbytes(tensor));
tensor->data = read_data.data();
- model_loader->load_data_for(tensor);
+ ml->load_data_for(tensor);
LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
- ++idx, model_loader->n_tensors,
+ ++idx, ml->n_tensors,
ggml_get_name(tensor),
llama_format_tensor_shape(tensor).c_str(),
ggml_type_name(tensor->type));
@@ -3548,7 +4484,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
new_type = quantized_type;
#ifdef GGML_USE_K_QUANTS
// TODO: avoid hardcoded tensor names - use the TN_* constants
- if (name == TN_OUTPUT) {
+ const auto tn = LLM_TN(ml->get_arch());
+
+ if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
int nx = tensor->ne[0];
int ny = tensor->ne[1];
if (nx % QK_K == 0 && ny % QK_K == 0) {
@@ -3600,10 +4538,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
}
}
if (convert_incompatible_tensor) {
- if (name == TN_OUTPUT) {
+ if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
- } else if (name == TN_TOKEN_EMBD) {
+ } else if (name == tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
} else {
@@ -3785,28 +4723,28 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
}
// load base model
- std::unique_ptr model_loader;
+ std::unique_ptr ml;
ggml_context * base_ctx = NULL;
std::vector base_buf;
if (path_base_model) {
LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
- model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
+ ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
size_t ctx_size;
size_t mmapped_size;
- model_loader->calc_sizes(ctx_size, mmapped_size);
+ ml->calc_sizes(ctx_size, mmapped_size);
base_buf.resize(ctx_size);
ggml_init_params base_params;
base_params.mem_size = base_buf.size();
base_params.mem_buffer = base_buf.data();
- base_params.no_alloc = model_loader->use_mmap;
+ base_params.no_alloc = ml->use_mmap;
base_ctx = ggml_init(base_params);
// maybe this should in llama_model_loader
- if (model_loader->use_mmap) {
- model_loader->mapping.reset(new llama_mmap(&model_loader->file, /* prefetch */ 0, ggml_is_numa()));
+ if (ml->use_mmap) {
+ ml->mapping.reset(new llama_mmap(&ml->file, /* prefetch */ 0, ggml_is_numa()));
}
}
@@ -3910,18 +4848,19 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
#endif // GGML_USE_CUBLAS
ggml_tensor * base_t;
- if (model_loader) {
- struct gguf_context * ctx_gguf = model_loader->ctx_gguf;
+ if (ml) {
+ struct gguf_context * ctx_gguf = ml->ctx_gguf;
// load from base model
if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) {
+ // TODO: throw
LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
return 1;
}
// TODO: not tested!! maybe not working!
- base_t = model_loader->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
- model_loader->load_data_for(base_t);
+ base_t = ml->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
+ ml->load_data_for(base_t);
} else {
base_t = dest_t;
}
@@ -4096,7 +5035,23 @@ struct llama_model * llama_load_model_from_file(
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
- if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers,
+ unsigned cur_percentage = 0;
+ if (params.progress_callback == NULL) {
+ params.progress_callback_user_data = &cur_percentage;
+ params.progress_callback = [](float progress, void * ctx) {
+ unsigned * cur_percentage_p = (unsigned *) ctx;
+ unsigned percentage = (unsigned) (100 * progress);
+ while (percentage > *cur_percentage_p) {
+ *cur_percentage_p = percentage;
+ LLAMA_LOG_INFO(".");
+ if (percentage >= 100) {
+ LLAMA_LOG_INFO("\n");
+ }
+ }
+ };
+ }
+
+ if (!llama_model_load(path_model, *model, params.n_ctx, params.n_batch, params.n_gpu_layers,
params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,
params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only,
params.progress_callback, params.progress_callback_user_data)) {
@@ -4126,22 +5081,6 @@ struct llama_context * llama_new_context_with_model(
params.seed = time(NULL);
}
- unsigned cur_percentage = 0;
- if (params.progress_callback == NULL) {
- params.progress_callback_user_data = &cur_percentage;
- params.progress_callback = [](float progress, void * ctx) {
- unsigned * cur_percentage_p = (unsigned *) ctx;
- unsigned percentage = (unsigned) (100 * progress);
- while (percentage > *cur_percentage_p) {
- *cur_percentage_p = percentage;
- LLAMA_LOG_INFO(".");
- if (percentage >= 100) {
- LLAMA_LOG_INFO("\n");
- }
- }
- };
- }
-
ctx->rng = std::mt19937(params.seed);
ctx->logits_all = params.logits_all;
@@ -4279,13 +5218,14 @@ struct llama_context * llama_new_context_with_model(
struct llama_context * llama_init_from_file(
const char * path_model,
struct llama_context_params params) {
-
struct llama_model * model = llama_load_model_from_file(path_model, params);
if (!model) {
return nullptr;
}
+
struct llama_context * ctx = llama_new_context_with_model(model, params);
ctx->model_owner = true;
+
return ctx;
}
@@ -4305,6 +5245,10 @@ int llama_n_embd(const struct llama_context * ctx) {
return ctx->model.hparams.n_embd;
}
+enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx) {
+ return ctx->model.vocab.type;
+}
+
int llama_model_n_vocab(const struct llama_model * model) {
return model->vocab.id_to_token.size();
}
@@ -4318,7 +5262,10 @@ int llama_model_n_embd(const struct llama_model * model) {
}
int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size) {
- return snprintf(buf, buf_size, "LLaMA %s %s", llama_model_type_name(model->type), llama_model_ftype_name(model->ftype).c_str());
+ return snprintf(buf, buf_size, "%s %s %s",
+ model->name.c_str(),
+ llama_model_type_name(model->type),
+ llama_model_ftype_name(model->ftype).c_str());
}
int llama_model_quantize(
@@ -4839,26 +5786,6 @@ int llama_tokenize(
return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
}
-int llama_tokenize_bpe(
- struct llama_context * ctx,
- const char * text,
- llama_token * tokens,
- int n_max_tokens,
- bool add_bos) {
- auto res = llama_tokenize_internal(ctx->model.vocab, text, add_bos, false);
-
- if (n_max_tokens < (int) res.size()) {
- LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
- return -((int) res.size());
- }
-
- for (size_t i = 0; i < res.size(); i++) {
- tokens[i] = res[i];
- }
-
- return res.size();
-}
-
int llama_tokenize_with_model(
const struct llama_model * model,
const char * text,
@@ -4884,25 +5811,13 @@ int llama_token_to_str(const struct llama_context * ctx, llama_token token, char
return llama_token_to_str_with_model(&ctx->model, token, buf, length);
}
-int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token, char * buf, int length) {
- if (0 <= token && token < llama_model_n_vocab(&ctx->model)) {
- std::string result = ctx->model.vocab.id_to_token[token].text;
- if (length < (int) result.length()) {
- return -result.length();
- }
- memcpy(buf, result.c_str(), result.length());
- return result.length();
- }
- return 0;
-}
-
// does not write null-terminator to str
int llama_token_to_str_with_model(const struct llama_model * model, llama_token token, char * buf, int length) {
if (0 <= token && token < llama_model_n_vocab(model)) {
if (llama_is_normal_token(model->vocab, token)) {
std::string result = model->vocab.id_to_token[token].text;
if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM) {
- result = llama_unescape_whitespace(result);
+ llama_unescape_whitespace(result);
}
if (length < (int) result.length()) {
return -result.length();
diff --git a/llama.h b/llama.h
index 7ce478d54..4e7638c04 100644
--- a/llama.h
+++ b/llama.h
@@ -247,6 +247,8 @@ extern "C" {
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
+ LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx);
+
LLAMA_API int llama_model_n_vocab(const struct llama_model * model);
LLAMA_API int llama_model_n_ctx (const struct llama_model * model);
LLAMA_API int llama_model_n_embd (const struct llama_model * model);
@@ -368,13 +370,6 @@ extern "C" {
int n_max_tokens,
bool add_bos);
- LLAMA_API int llama_tokenize_bpe(
- struct llama_context * ctx,
- const char * text,
- llama_token * tokens,
- int n_max_tokens,
- bool add_bos);
-
LLAMA_API int llama_tokenize_with_model(
const struct llama_model * model,
const char * text,
@@ -390,12 +385,6 @@ extern "C" {
char * buf,
int length);
- LLAMA_API int llama_token_to_str_bpe(
- const struct llama_context * ctx,
- llama_token token,
- char * buf,
- int length);
-
LLAMA_API int llama_token_to_str_with_model(
const struct llama_model * model,
llama_token token,
diff --git a/scripts/get-wikitext-2.sh b/scripts/get-wikitext-2.sh
old mode 100644
new mode 100755
diff --git a/scripts/perf-run-all.sh b/scripts/perf-run-all.sh
deleted file mode 100755
index 7dbfc7c20..000000000
--- a/scripts/perf-run-all.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/bash
-#
-# Measure the performance (time per token) of the various quantization techniques
-#
-
-QUANTIZE=0
-if [ "$1" != "" ]; then
- echo "Quantizing"
- QUANTIZE=1
-fi
-
-if [ "$QUANTIZE" != "0" ]; then
- #
- # quantize
- #
-
- # 7B
- time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q4_0.bin q4_0 2>&1 | tee ../qnt-7b-q4_0.txt
- time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q4_1.bin q4_1 2>&1 | tee ../qnt-7b-q4_1.txt
- time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q5_0.bin q5_0 2>&1 | tee ../qnt-7b-q5_0.txt
- time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q5_1.bin q5_1 2>&1 | tee ../qnt-7b-q5_1.txt
- time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q8_0.bin q8_0 2>&1 | tee ../qnt-7b-q8_0.txt
-
- # 13B
- time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q4_0.bin q4_0 2>&1 | tee ../qnt-13b-q4_0.txt
- time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q4_1.bin q4_1 2>&1 | tee ../qnt-13b-q4_1.txt
- time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q5_0.bin q5_0 2>&1 | tee ../qnt-13b-q5_0.txt
- time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q5_1.bin q5_1 2>&1 | tee ../qnt-13b-q5_1.txt
- time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q8_0.bin q8_0 2>&1 | tee ../qnt-13b-q8_0.txt
-fi
-
-#
-# perf
-# run each command twice
-#
-
-set -x
-
-# 7B - 4 threads
- ./bin/main -m ../models/7B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-7b-f16.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-7b-q4_0.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-7b-q4_1.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-7b-q5_0.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-7b-q5_1.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-7b-q8_0.txt | grep llama_print_timings
-
-# 7B - 8 threads
- ./bin/main -m ../models/7B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-7b-f16.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-7b-q4_0.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-7b-q4_1.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-7b-q5_0.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-7b-q5_1.txt | grep llama_print_timings
- ./bin/main -m ../models/7B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/7B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-7b-q8_0.txt | grep llama_print_timings
-
-# 13B - 4 threads
- ./bin/main -m ../models/13B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-13b-f16.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-13b-q4_0.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-13b-q4_1.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-13b-q5_0.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-13b-q5_1.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 4 2>&1 | tee ../perf-13b-q8_0.txt | grep llama_print_timings
-
-# 13B - 8 threads
- ./bin/main -m ../models/13B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-f16.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-13b-f16.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q4_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-13b-q4_0.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q4_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-13b-q4_1.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q5_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-13b-q5_0.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q5_1.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-13b-q5_1.txt | grep llama_print_timings
- ./bin/main -m ../models/13B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | grep "I believe"
-time ./bin/main -m ../models/13B/ggml-model-q8_0.bin -p "I believe the meaning of life is" --no-mmap -c 2048 --ignore-eos -s 1 -n 64 -t 8 2>&1 | tee ../perf-13b-q8_0.txt | grep llama_print_timings
diff --git a/scripts/ppl-run-all.sh b/scripts/ppl-run-all.sh
deleted file mode 100755
index c59e3075d..000000000
--- a/scripts/ppl-run-all.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-#
-# quantize
-#
-
-# 7B
-time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q4_0.bin q4_0 2>&1 | tee ../qnt-7b-q4_0.txt
-time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q4_1.bin q4_1 2>&1 | tee ../qnt-7b-q4_1.txt
-time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q5_0.bin q5_0 2>&1 | tee ../qnt-7b-q5_0.txt
-time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q5_1.bin q5_1 2>&1 | tee ../qnt-7b-q5_1.txt
-time ./bin/quantize ../models/7B/ggml-model-f16.bin ../models/7B/ggml-model-q8_0.bin q8_0 2>&1 | tee ../qnt-7b-q8_0.txt
-
-# 13B
-time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q4_0.bin q4_0 2>&1 | tee ../qnt-13b-q4_0.txt
-time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q4_1.bin q4_1 2>&1 | tee ../qnt-13b-q4_1.txt
-time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q5_0.bin q5_0 2>&1 | tee ../qnt-13b-q5_0.txt
-time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q5_1.bin q5_1 2>&1 | tee ../qnt-13b-q5_1.txt
-time ./bin/quantize ../models/13B/ggml-model-f16.bin ../models/13B/ggml-model-q8_0.bin q8_0 2>&1 | tee ../qnt-13b-q8_0.txt
-
-#
-# perplexity
-#
-
-# 7B
-time ./bin/perplexity -m ../models/7B/ggml-model-f16.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-7b-f16.txt
-time ./bin/perplexity -m ../models/7B/ggml-model-q4_0.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-7b-q4_0.txt
-time ./bin/perplexity -m ../models/7B/ggml-model-q4_1.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-7b-q4_1.txt
-time ./bin/perplexity -m ../models/7B/ggml-model-q5_0.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-7b-q5_0.txt
-time ./bin/perplexity -m ../models/7B/ggml-model-q5_1.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-7b-q5_1.txt
-time ./bin/perplexity -m ../models/7B/ggml-model-q8_0.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-7b-q8_0.txt
-
-# 13B
-time ./bin/perplexity -m ../models/13B/ggml-model-f16.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-13b-f16.txt
-time ./bin/perplexity -m ../models/13B/ggml-model-q4_0.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-13b-q4_0.txt
-time ./bin/perplexity -m ../models/13B/ggml-model-q4_1.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-13b-q4_1.txt
-time ./bin/perplexity -m ../models/13B/ggml-model-q5_0.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-13b-q5_0.txt
-time ./bin/perplexity -m ../models/13B/ggml-model-q5_1.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-13b-q5_1.txt
-time ./bin/perplexity -m ../models/13B/ggml-model-q8_0.bin -f ./wiki.test.raw --no-mmap -t 12 2>&1 | tee ../ppl-13b-q8_0.txt
diff --git a/scripts/qnt-all.sh b/scripts/qnt-all.sh
new file mode 100755
index 000000000..1b3d07da5
--- /dev/null
+++ b/scripts/qnt-all.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+qnt=(q8_0 q6_k q5_k q5_1 q5_0 q4_k q4_1 q4_0 q3_k q2_k)
+args=""
+
+if [ -z "$1" ]; then
+ echo "usage: $0 [qnt] [args]"
+ echo "default: $0 \"${qnt[@]}\" \"${args}\""
+ exit 1
+fi
+
+if [ ! -z "$2" ]; then
+ qnt=($2)
+fi
+
+if [ ! -z "$3" ]; then
+ args="$3"
+fi
+
+model="$1"
+out="../tmp/results-${model}"
+
+mkdir -p ${out}
+
+for q in ${qnt[@]}; do
+ time ./bin/quantize ../models/${model}/ggml-model-f16.gguf ../models/${model}/ggml-model-${q}.gguf ${q} 2>&1 ${args} | tee ${out}/qnt-${q}.txt
+done
diff --git a/scripts/run-all-perf.sh b/scripts/run-all-perf.sh
new file mode 100755
index 000000000..91a6d853f
--- /dev/null
+++ b/scripts/run-all-perf.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+qnt=(f16 q8_0 q6_k q5_k q5_1 q5_0 q4_k q4_1 q4_0 q3_k q2_k)
+args="-ngl 999 -n 64 -p 512"
+
+if [ -z "$1" ]; then
+ echo "usage: $0 [qnt] [args]"
+ echo "default: $0 \"${qnt[@]}\" \"${args}\""
+ exit 1
+fi
+
+if [ ! -z "$2" ]; then
+ qnt=($2)
+fi
+
+if [ ! -z "$3" ]; then
+ args="$3"
+fi
+
+model="$1"
+out="../tmp/results-${model}"
+
+mkdir -p ${out}
+
+mstr=""
+
+for q in ${qnt[@]}; do
+ mstr="${mstr} -m ../models/${model}/ggml-model-${q}.gguf"
+done
+
+./bin/llama-bench ${mstr} ${args} 2> /dev/null
diff --git a/scripts/run-all-ppl.sh b/scripts/run-all-ppl.sh
new file mode 100755
index 000000000..366d0866c
--- /dev/null
+++ b/scripts/run-all-ppl.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+qnt=(f16 q8_0 q6_k q5_k q5_1 q5_0 q4_k q4_1 q4_0 q3_k q2_k)
+args="-ngl 999 -t 8"
+
+if [ -z "$1" ]; then
+ echo "usage: $0 [qnt] [args]"
+ echo "default: $0 \"${qnt[@]}\" \"${args}\""
+ exit 1
+fi
+
+if [ ! -z "$2" ]; then
+ qnt=($2)
+fi
+
+if [ ! -z "$3" ]; then
+ args="$3"
+fi
+
+model="$1"
+out="../tmp/results-${model}"
+
+mkdir -p ${out}
+
+for q in ${qnt[@]}; do
+ time ./bin/perplexity -m ../models/${model}/ggml-model-f16.gguf -f ./wiki.test.raw ${args} 2>&1 | tee ${out}/ppl-${q}.txt
+done
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 4ccefe932..2afaf86b1 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -28,7 +28,8 @@ llama_build_and_test_executable(test-sampling.cpp)
llama_build_executable(test-tokenizer-0.cpp)
llama_test_executable (test-tokenizer-0.llama test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
llama_build_executable(test-tokenizer-1.cpp)
-llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
+# test-tokenizer-1 requires a BPE vocab. re-enable when we have one.
+#llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
llama_build_and_test_executable(test-grammar-parser.cpp)
llama_build_and_test_executable(test-llama-grammar.cpp)
diff --git a/tests/test-tokenizer-1.cpp b/tests/test-tokenizer-1.cpp
index 993d17f18..bd607d12b 100644
--- a/tests/test-tokenizer-1.cpp
+++ b/tests/test-tokenizer-1.cpp
@@ -67,11 +67,13 @@ int main(int argc, char **argv) {
}
}
+ GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_BPE);
+
const int n_vocab = llama_n_vocab(ctx);
for (int i = 0; i < n_vocab; ++i) {
- std::string forward = llama_token_to_str_bpe(ctx, i);
- std::vector tokens = llama_tokenize_bpe(ctx, forward, false);
+ std::string forward = llama_token_to_str(ctx, i);
+ std::vector tokens = llama_tokenize(ctx, forward, false);
if (tokens.size() == 1) {
if (i != tokens[0]) {
std::string backward = llama_token_to_str(ctx, tokens[0]);
@@ -79,16 +81,6 @@ int main(int argc, char **argv) {
__func__, i, llama_token_to_str(ctx, i).c_str(), tokens[0], backward.c_str());
return 2;
}
- } else {
- llama_token_type type = llama_token_get_type(ctx, i);
- if (type == LLAMA_TOKEN_TYPE_UNKNOWN || type == LLAMA_TOKEN_TYPE_CONTROL || type == LLAMA_TOKEN_TYPE_BYTE) {
- fprintf(stderr, "%s : info: token %d is string %s and bpe returns tokens %s\n",
- __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str());
- } else {
- fprintf(stderr, "%s : error: token %d is string %s but bpe returns tokens %s\n",
- __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str());
- return 2;
- }
}
}