From 34ab5268432fd287caa68d60bdd8aef411def3fa Mon Sep 17 00:00:00 2001 From: anzz1 Date: Sat, 25 Mar 2023 22:29:22 +0200 Subject: [PATCH 01/76] (Windows) Set console to UTF-8 on init (#420) Sets console codepage to 65001 (CP_UTF8) on start for both input and output, should fix problems with UTF-8 characters. --- examples/main/main.cpp | 55 ++++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 7bb2b6bc4..9af8a7405 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -23,6 +23,8 @@ extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle); extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode); extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode); +extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID); +extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID); #endif #define ANSI_COLOR_RED "\x1b[31m" @@ -44,17 +46,6 @@ enum console_state { static console_state con_st = CONSOLE_STATE_DEFAULT; static bool con_use_color = false; -void enable_console_colors() { -#if defined (_WIN32) - // Enable ANSI colors on Windows 10+ - unsigned long dwMode = 0; - void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11) - if (hConOut && hConOut != (void*)-1 && GetConsoleMode(hConOut, &dwMode) && !(dwMode & 0x4)) { - SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4) - } -#endif -} - void set_console_state(console_state new_st) { if (!con_use_color) return; // only emit color code if state changed @@ -90,6 +81,32 @@ void sigint_handler(int signo) { } #endif +#if defined (_WIN32) +void win32_console_init(void) { + unsigned long dwMode = 0; + void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11) + if (!hConOut || hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode)) { + hConOut = GetStdHandle((unsigned long)-12); // STD_ERROR_HANDLE (-12) + if (hConOut && (hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode))) { + hConOut = 0; + } + } + if (hConOut) { + // Enable ANSI colors on Windows 10+ + if (con_use_color && !(dwMode & 0x4)) { + SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4) + } + // Set console output codepage to UTF8 + SetConsoleOutputCP(65001); // CP_UTF8 + } + void* hConIn = GetStdHandle((unsigned long)-10); // STD_INPUT_HANDLE (-10) + if (hConIn && hConIn != (void*)-1 && GetConsoleMode(hConIn, &dwMode)) { + // Set console input codepage to UTF8 + SetConsoleCP(65001); // CP_UTF8 + } +} +#endif + int main(int argc, char ** argv) { gpt_params params; params.model = "models/llama-7B/ggml-model.bin"; @@ -98,6 +115,15 @@ int main(int argc, char ** argv) { return 1; } + + // save choice to use color for later + // (note for later: this is a slightly awkward choice) + con_use_color = params.use_color; + +#if defined (_WIN32) + win32_console_init(); +#endif + if (params.perplexity) { printf("\n************\n"); printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__); @@ -130,10 +156,6 @@ int main(int argc, char ** argv) { params.prompt = gpt_random_prompt(rng); } - // save choice to use color for later - // (note for later: this is a slightly awkward choice) - con_use_color = params.use_color; - // params.prompt = R"(// this function checks if the number n is prime //bool is_prime(int n) {)"; @@ -285,9 +307,6 @@ int main(int argc, char ** argv) { int n_consumed = 0; // the first thing we will do is to output the prompt, so set color accordingly - if (params.use_color) { - enable_console_colors(); - } set_console_state(CONSOLE_STATE_PROMPT); std::vector embd; From 2f7bf7dd7cd7299874d582f7f34834418abf4057 Mon Sep 17 00:00:00 2001 From: anzz1 Date: Sat, 25 Mar 2023 23:38:11 +0200 Subject: [PATCH 02/76] CMake / CI additions (#497) * CMake: Add AVX512 option * CI: Add AVX/AVX512 builds (Windows) (AVX512 tests can only be run when the worker happens to support it, building works anyway) * CMake: Fix sanitizer linkage ( merged #468 ) * CI: Add sanitizer builds (Ubuntu) * CI: Fix release tagging (change @zendesk/action-create-release to @anzz1/action-create-release until upstream PR Added commitish as input zendesk/action-create-release#32 is merged) --- .github/workflows/build.yml | 113 ++++++++++++++++++++++++++++++++---- CMakeLists.txt | 14 ++++- 2 files changed, 114 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e9826a735..126e1e66d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -64,6 +64,38 @@ jobs: cd build ctest --output-on-failure + ubuntu-latest-cmake-sanitizer: + runs-on: ubuntu-latest + + strategy: + matrix: + sanitizer: [ADDRESS, THREAD, UNDEFINED] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v1 + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential + + - name: Build + id: cmake_build + run: | + mkdir build + cd build + cmake .. -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON + cmake --build . --config Release + + - name: Test + id: cmake_test + run: | + cd build + ctest --output-on-failure + macOS-latest-make: runs-on: macos-latest @@ -112,6 +144,16 @@ jobs: windows-latest-cmake: runs-on: windows-latest + strategy: + matrix: + include: + - build: 'avx2' + defines: '' + - build: 'avx' + defines: '-DLLAMA_AVX2=OFF' + - build: 'avx512' + defines: '-DLLAMA_AVX512=ON' + steps: - name: Clone id: checkout @@ -122,11 +164,21 @@ jobs: run: | mkdir build cd build - cmake .. + cmake .. ${{ matrix.defines }} cmake --build . --config Release + - name: Check AVX512F support + id: check_avx512f + if: ${{ matrix.build == 'avx512' }} + continue-on-error: true + run: | + cd build + Set-Content -Path .\avx512f.exe -Value ([Convert]::FromBase64String('TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyAAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1vZGUuDQ0KJAAAAAAAAAClmfXY4fibi+H4m4vh+JuL4fiai+P4m4si98aL4vibi7Xbq4vg+JuLUmljaOH4m4sAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQRQAATAEBAGo6H2QAAAAAAAAAAOAADwELAQYAAAIAAAAAAAAAAAAADBAAAAAQAAAAIAAAAABAAAAQAAAAAgAABAAAAAAAAAAEAAAAAAAAAAAgAAAAAgAAAAAAAAMAAAAAABAAABAAAAAAEAAAEAAAAAAAABAAAAAAAAAAAAAAAFQQAAAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC50ZXh0AAAAsgAAAAAQAAAAAgAAAAIAAAAAAAAAAAAAAAAAACAAAGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUEAAAiBAAAAAAAABVi+xRUVNTuAcAAAAPosHrEGaD4wGJXfxbg0X8MI1F+GoAUI1F/GoBUGr1/xUAEEAAUP8VBBBAAItF/FuDwND32BvAQMnDzMx8EAAAAAAAAAAAAACkEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlBAAAIgQAAAAAAAApANXcml0ZUZpbGUAuQFHZXRTdGRIYW5kbGUAAEtFUk5FTDMyLmRsbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==')) -AsByteStream + .\avx512f.exe && echo " AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo " AVX512F: NO" + - name: Test id: cmake_test + if: ${{ matrix.build != 'avx512' || env.HAS_AVX512F == '1' }} # Test AVX-512 only when possible run: | cd build ctest -C Release --output-on-failure @@ -140,12 +192,39 @@ jobs: id: pack_artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | - 7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-x64.zip .\build\bin\Release\* + 7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-x64.zip .\build\bin\Release\* + + - name: Upload artifacts + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + uses: actions/upload-artifact@v3 + with: + path: | + llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-x64.zip + + release: + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + + runs-on: ubuntu-latest + + needs: + - ubuntu-latest-make + - ubuntu-latest-cmake + - macOS-latest-make + - macOS-latest-cmake + - windows-latest-cmake + + steps: + - name: Download artifacts + id: download-artifact + uses: actions/download-artifact@v3 + + - name: Get commit hash + id: commit + uses: pr-mpt/actions-commit-hash@v2 - name: Create release id: create_release - if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} - uses: zendesk/action-create-release@v1 + uses: anzz1/action-create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -153,15 +232,25 @@ jobs: - name: Upload release id: upload_release - if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: actions/github-script@v3 with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: .\llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-x64.zip - asset_name: llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-x64.zip - asset_content_type: application/octet-stream + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + const path = require('path'); + const fs = require('fs'); + const release_id = '${{ steps.create_release.outputs.id }}'; + for (let file of await fs.readdirSync('./artifact')) { + if (path.extname(file) === '.zip') { + console.log('uploadReleaseAsset', file); + await github.repos.uploadReleaseAsset({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: release_id, + name: file, + data: await fs.readFileSync(`./artifact/${file}`) + }); + } + } # ubuntu-latest-gcc: # runs-on: ubuntu-latest diff --git a/CMakeLists.txt b/CMakeLists.txt index a1ff5a44e..27a222a16 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,6 +54,7 @@ option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" # instruction set specific option(LLAMA_AVX "llama: enable AVX" ON) option(LLAMA_AVX2 "llama: enable AVX2" ON) +option(LLAMA_AVX512 "llama: enable AVX512" OFF) option(LLAMA_FMA "llama: enable FMA" ON) # 3rd party libs @@ -75,14 +76,17 @@ find_package(Threads REQUIRED) if (NOT MSVC) if (LLAMA_SANITIZE_THREAD) add_compile_options(-fsanitize=thread) + link_libraries(-fsanitize=thread) endif() if (LLAMA_SANITIZE_ADDRESS) add_compile_options(-fsanitize=address -fno-omit-frame-pointer) + link_libraries(-fsanitize=address) endif() if (LLAMA_SANITIZE_UNDEFINED) add_compile_options(-fsanitize=undefined) + link_libraries(-fsanitize=undefined) endif() endif() @@ -185,7 +189,9 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") message(STATUS "x86 detected") if (MSVC) - if (LLAMA_AVX2) + if (LLAMA_AVX512) + add_compile_options(/arch:AVX512) + elseif (LLAMA_AVX2) add_compile_options(/arch:AVX2) elseif (LLAMA_AVX) add_compile_options(/arch:AVX) @@ -201,6 +207,12 @@ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") if (LLAMA_AVX2) add_compile_options(-mavx2) endif() + if (LLAMA_AVX512) + add_compile_options(-mavx512f) + # add_compile_options(-mavx512cd) + # add_compile_options(-mavx512dq) + # add_compile_options(-mavx512bw) + endif() endif() else() # TODO: support PowerPC From f732695cd57fb41e3a1be625cec4edf5be45b40a Mon Sep 17 00:00:00 2001 From: jp-x-g Date: Sat, 25 Mar 2023 14:53:55 -0700 Subject: [PATCH 03/76] Clarify console output in convert-pth-to-ggml.py (#512) "Processing part 1 of 3" instead of "Processing part 0" --- convert-pth-to-ggml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index f0f6b0ec4..ccf2c57b1 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -161,7 +161,7 @@ def main(): for p in range(n_parts): - print(f"Processing part {p}\n") + print(f"Processing part {p+1} of {n_parts}\n") fname_model = f"{dir_model}/consolidated.0{p}.pth" fname_out = f"{dir_model}/ggml-model-{ftype_str[ftype]}.bin{'' if p == 0 else '.' + str(p)}" From 19726169b379bebc96189673a19b89ab1d307659 Mon Sep 17 00:00:00 2001 From: anzz1 Date: Sun, 26 Mar 2023 00:13:28 +0200 Subject: [PATCH 04/76] CI: Run other sanitizer builds even if one fails (#511) applies only to sanitizer builds so they wont be cancelled --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 126e1e66d..2538d8566 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -67,6 +67,8 @@ jobs: ubuntu-latest-cmake-sanitizer: runs-on: ubuntu-latest + continue-on-error: true + strategy: matrix: sanitizer: [ADDRESS, THREAD, UNDEFINED] From 33e35b8fe8f09adcac0632e9cece62e1dd629f7d Mon Sep 17 00:00:00 2001 From: Harald Fernengel Date: Sun, 26 Mar 2023 07:25:46 +0200 Subject: [PATCH 05/76] Exit from interactive mode if input stream is bad (#491) Allow exiting the interactive prompt also with CTRL-D on Unix and CTRL-Z on Windows. --- examples/main/main.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 9af8a7405..e9478d541 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -450,7 +450,10 @@ int main(int argc, char ** argv) { std::string line; bool another_line = true; do { - std::getline(std::cin, line); + if (!std::getline(std::cin, line)) { + // input stream is bad or EOF received + return 0; + } if (line.empty() || line.back() != '\\') { another_line = false; } else { From 348d6926ee31d4476f9b90e1a627b0925a70f847 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 26 Mar 2023 10:20:49 +0300 Subject: [PATCH 06/76] Add logo to README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 9ba6241da..86dd9493b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # llama.cpp +![llama](https://user-images.githubusercontent.com/1991296/227761327-6d83e30e-2200-41a6-bfbb-f575231c54f4.png) + [![Actions Status](https://github.com/ggerganov/llama.cpp/workflows/CI/badge.svg)](https://github.com/ggerganov/llama.cpp/actions) [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) From 7a87d31f4f0c37bbb2ea695929fa4fe3ad579cda Mon Sep 17 00:00:00 2001 From: anzz1 Date: Sun, 26 Mar 2023 16:06:10 +0300 Subject: [PATCH 07/76] [main] fix infinite generation (-n == -1) (#523) --- examples/main/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index e9478d541..66b7c2d5d 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -493,7 +493,7 @@ int main(int argc, char ** argv) { } // In interactive mode, respect the maximum number of tokens and drop back to user input when reached. - if (params.interactive && n_remain <= 0) { + if (params.interactive && n_remain <= 0 && params.n_predict != -1) { n_remain = params.n_predict; is_interacting = true; } From b391579db92f095666be1d979899b54ae0981573 Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Sun, 26 Mar 2023 13:14:01 +0000 Subject: [PATCH 08/76] Update README and comments for standalone perplexity tool (#525) --- README.md | 6 +++--- examples/perplexity/perplexity.cpp | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 86dd9493b..5675a927b 100644 --- a/README.md +++ b/README.md @@ -248,7 +248,7 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach. ### Perplexity (Measuring model quality) -You can pass `--perplexity` as a command line option to measure perplexity over the given prompt. For more background, +You can use the `perplexity` example to measure perplexity over the given prompt. For more background, see https://huggingface.co/docs/transformers/perplexity. However, in general, lower perplexity is better for LLMs. #### Latest measurements @@ -271,10 +271,10 @@ Perplexity - model options #### How to run 1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research -2. Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` +2. Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` 3. Output: ``` -Calculating perplexity over 655 chunks +perplexity : calculating perplexity over 655 chunks 24.43 seconds per pass - ETA 4.45 hours [1]4.5970,[2]5.1807,[3]6.0382,... ``` diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index f617ba365..75d526d3d 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -19,7 +19,7 @@ std::vector softmax(const std::vector& logits) { void perplexity(llama_context * ctx, const gpt_params & params) { // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research - // Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` + // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` // Output: `perplexity: 13.5106 [114/114]` auto tokens = ::llama_tokenize(ctx, params.prompt, true); From 8c2ec5e21d580c99e257c3cfddcf21fa53229aa4 Mon Sep 17 00:00:00 2001 From: Juan Calderon-Perez <835733+gaby@users.noreply.github.com> Date: Sun, 26 Mar 2023 10:48:42 -0400 Subject: [PATCH 09/76] Add support for linux/arm64 platform during Docker Builds (#514) * Add support for linux/arm64 platform * Add platform to versioned builds --- .github/workflows/docker.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d1a43caa6..f70821de2 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -49,6 +49,7 @@ jobs: with: context: . push: true + platforms: linux/amd64,linux/arm64 tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}" file: ${{ matrix.config.dockerfile }} @@ -57,5 +58,6 @@ jobs: with: context: . push: ${{ github.event_name == 'push' }} + platforms: linux/amd64,linux/arm64 tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}" file: ${{ matrix.config.dockerfile }} \ No newline at end of file From 939ad2d3a56815f480b6fd5ea432a7ee576a7e6b Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Sun, 26 Mar 2023 15:34:02 +0000 Subject: [PATCH 10/76] Fix undefined variables in debug build, remove unused variables (#531) --- ggml.c | 47 +++++++++++++++++++---------------------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/ggml.c b/ggml.c index c9a4e8675..ec00e6317 100644 --- a/ggml.c +++ b/ggml.c @@ -1698,8 +1698,6 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void // Horizontal sum of all lanes of the accumulator sumf = _mm512_reduce_add_ps( acc0 ) + _mm512_reduce_add_ps( acc1 ); #elif defined(__AVX2__) - const size_t countBlocks = nb; - // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); @@ -5806,23 +5804,28 @@ static void ggml_compute_forward_mul_mat_f32( const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) const int ne10 = src1->ne[0]; +#endif const int ne11 = src1->ne[1]; - //const int ne12 = src1->ne[2]; - //const int ne13 = src1->ne[3]; +#ifndef NDEBUG + const int ne12 = src1->ne[2]; + const int ne13 = src1->ne[3]; - //const int ne0 = dst->ne[0]; - //const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + const int ne2 = dst->ne[2]; + const int ne3 = dst->ne[3]; - //const int nb00 = src0->nb[0]; + const int nb00 = src0->nb[0]; +#endif const int nb01 = src0->nb[1]; const int nb02 = src0->nb[2]; const int nb03 = src0->nb[3]; +#ifndef NDEBUG const int nb10 = src1->nb[0]; +#endif const int nb11 = src1->nb[1]; const int nb12 = src1->nb[2]; const int nb13 = src1->nb[3]; @@ -5840,8 +5843,9 @@ static void ggml_compute_forward_mul_mat_f32( assert(ne2 == ne12); assert(ne3 == ne13); - // TODO: we don't support permuted src0 + // we don't support permuted src0 or src1 assert(nb00 == sizeof(float)); + assert(nb10 == sizeof(float)); // dst cannot be transposed or permuted assert(nb0 == sizeof(float)); @@ -5859,8 +5863,6 @@ static void ggml_compute_forward_mul_mat_f32( #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - GGML_ASSERT(nb10 == sizeof(float)); - if (params->ith != 0) { return; } @@ -5903,9 +5905,6 @@ static void ggml_compute_forward_mul_mat_f32( return; } - // TODO: do not support transposed src1 - assert(nb10 == sizeof(float)); - // parallelize by src0 rows using ggml_vec_dot_f32 // total rows in src0 @@ -6169,7 +6168,6 @@ static void ggml_compute_forward_mul_mat_q4_0_f32( const int ne1 = dst->ne[1]; const int ne2 = dst->ne[2]; const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -6194,8 +6192,9 @@ static void ggml_compute_forward_mul_mat_q4_0_f32( GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); - // TODO: we don't support permuted src0 + // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_0]); + GGML_ASSERT(nb10 == sizeof(float)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); @@ -6213,8 +6212,6 @@ static void ggml_compute_forward_mul_mat_q4_0_f32( #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - GGML_ASSERT(nb10 == sizeof(float)); - if (params->ith != 0) { return; } @@ -6278,8 +6275,6 @@ static void ggml_compute_forward_mul_mat_q4_0_f32( return; } - // TODO: do not support transposed src1 - // parallelize by src0 rows using ggml_vec_dot_q4_0 // total rows in src0 @@ -6354,7 +6349,6 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( const int ne1 = dst->ne[1]; const int ne2 = dst->ne[2]; const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -6379,8 +6373,9 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); - // TODO: we don't support permuted src0 + // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_1]); + GGML_ASSERT(nb10 == sizeof(float)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); @@ -6398,8 +6393,6 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - GGML_ASSERT(nb10 == sizeof(float)); - if (params->ith != 0) { return; } @@ -6466,8 +6459,6 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( return; } - // TODO: do not support transposed src1 - // parallelize by src0 rows using ggml_vec_dot_q4_1 // total rows in src0 From 34c1072e497eb92d81ee7c0e12aa6741496a41c6 Mon Sep 17 00:00:00 2001 From: Erik Scholz Date: Sun, 26 Mar 2023 17:48:40 +0200 Subject: [PATCH 11/76] ci: add debug build to sanitizer build matrix (#527) --- .github/workflows/build.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2538d8566..26b451943 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -72,6 +72,8 @@ jobs: strategy: matrix: sanitizer: [ADDRESS, THREAD, UNDEFINED] + build_type: [Debug, Release] + accelerate: [ON, OFF] steps: - name: Clone @@ -89,8 +91,8 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON - cmake --build . --config Release + cmake .. -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DLLAMA_ACCELERATE=${{ matrix.accelerate }} + cmake --build . --config ${{ matrix.build_type }} - name: Test id: cmake_test From 7e5395575a3360598f2565c73c8a2ec0c0abbdb8 Mon Sep 17 00:00:00 2001 From: Marco Matthies <71844+marcom@users.noreply.github.com> Date: Mon, 27 Mar 2023 06:55:26 +0200 Subject: [PATCH 12/76] Fix missing ggml link in cmake for examples/* on w64-mingw32 (#542) --- examples/embedding/CMakeLists.txt | 2 +- examples/main/CMakeLists.txt | 2 +- examples/perplexity/CMakeLists.txt | 2 +- examples/quantize/CMakeLists.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/embedding/CMakeLists.txt b/examples/embedding/CMakeLists.txt index 88c425d4a..def5b831b 100644 --- a/examples/embedding/CMakeLists.txt +++ b/examples/embedding/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET embedding) add_executable(${TARGET} embedding.cpp) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/main/CMakeLists.txt b/examples/main/CMakeLists.txt index b2dcc2910..aa1f79406 100644 --- a/examples/main/CMakeLists.txt +++ b/examples/main/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET main) add_executable(${TARGET} main.cpp) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/perplexity/CMakeLists.txt b/examples/perplexity/CMakeLists.txt index 5836df8b2..9bd8e376f 100644 --- a/examples/perplexity/CMakeLists.txt +++ b/examples/perplexity/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET perplexity) add_executable(${TARGET} perplexity.cpp) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/quantize/CMakeLists.txt b/examples/quantize/CMakeLists.txt index fb27d4517..17a995bbd 100644 --- a/examples/quantize/CMakeLists.txt +++ b/examples/quantize/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET quantize) add_executable(${TARGET} quantize.cpp) -target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE llama ggml ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) From 4b8efff0e3945090379aa2f897ff125c8f9cdbae Mon Sep 17 00:00:00 2001 From: RJ Adriaansen Date: Tue, 28 Mar 2023 08:11:09 +0200 Subject: [PATCH 13/76] Add embedding example to Makefile (#540) --- Makefile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 98a2d85f3..973b2951b 100644 --- a/Makefile +++ b/Makefile @@ -212,7 +212,7 @@ $(info I CC: $(CCV)) $(info I CXX: $(CXXV)) $(info ) -default: main quantize perplexity +default: main quantize perplexity embedding # # Build library @@ -228,7 +228,7 @@ common.o: examples/common.cpp examples/common.h $(CXX) $(CXXFLAGS) -c examples/common.cpp -o common.o clean: - rm -vf *.o main quantize perplexity + rm -vf *.o main quantize perplexity embedding main: examples/main/main.cpp ggml.o llama.o common.o $(CXX) $(CXXFLAGS) examples/main/main.cpp ggml.o llama.o common.o -o main $(LDFLAGS) @@ -242,6 +242,9 @@ quantize: examples/quantize/quantize.cpp ggml.o llama.o perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o $(CXX) $(CXXFLAGS) examples/perplexity/perplexity.cpp ggml.o llama.o common.o -o perplexity $(LDFLAGS) +embedding: examples/embedding/embedding.cpp ggml.o llama.o common.o + $(CXX) $(CXXFLAGS) examples/embedding/embedding.cpp ggml.o llama.o common.o -o embedding $(LDFLAGS) + # # Tests # From 7b8dbcb78b2f65c4676e41da215800d65846edd0 Mon Sep 17 00:00:00 2001 From: anzz1 Date: Tue, 28 Mar 2023 17:09:55 +0300 Subject: [PATCH 14/76] main.cpp fixes, refactoring (#571) - main: entering empty line passes back control without new input in interactive/instruct modes - instruct mode: keep prompt fix - instruct mode: duplicate instruct prompt fix - refactor: move common console code from main->common --- examples/common.cpp | 67 +++++++++++++++-- examples/common.h | 30 ++++++++ examples/main/main.cpp | 166 ++++++++++++++--------------------------- 3 files changed, 144 insertions(+), 119 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index 2ab000f4f..880ebe9a2 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -9,11 +9,20 @@ #include #include - #if defined(_MSC_VER) || defined(__MINGW32__) - #include // using malloc.h with MSC/MINGW - #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) - #include - #endif +#if defined(_MSC_VER) || defined(__MINGW32__) +#include // using malloc.h with MSC/MINGW +#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) +#include +#endif + +#if defined (_WIN32) +#pragma comment(lib,"kernel32.lib") +extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle); +extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode); +extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode); +extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID); +extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID); +#endif bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { // determine sensible default number of threads. @@ -204,7 +213,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n"); fprintf(stderr, " -f FNAME, --file FNAME\n"); fprintf(stderr, " prompt file to start generation.\n"); - fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 - infinity)\n", params.n_predict); + fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict); fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k); fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n); @@ -216,7 +225,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n"); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " --perplexity compute perplexity over the prompt\n"); - fprintf(stderr, " --keep number of tokens to keep from the initial prompt\n"); + fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); if (ggml_mlock_supported()) { fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n"); } @@ -256,3 +265,47 @@ std::vector llama_tokenize(struct llama_context * ctx, const std::s return res; } + +/* Keep track of current color of output, and emit ANSI code if it changes. */ +void set_console_color(console_state & con_st, console_color_t color) { + if (con_st.use_color && con_st.color != color) { + switch(color) { + case CONSOLE_COLOR_DEFAULT: + printf(ANSI_COLOR_RESET); + break; + case CONSOLE_COLOR_PROMPT: + printf(ANSI_COLOR_YELLOW); + break; + case CONSOLE_COLOR_USER_INPUT: + printf(ANSI_BOLD ANSI_COLOR_GREEN); + break; + } + con_st.color = color; + } +} + +#if defined (_WIN32) +void win32_console_init(bool enable_color) { + unsigned long dwMode = 0; + void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11) + if (!hConOut || hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode)) { + hConOut = GetStdHandle((unsigned long)-12); // STD_ERROR_HANDLE (-12) + if (hConOut && (hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode))) { + hConOut = 0; + } + } + if (hConOut) { + // Enable ANSI colors on Windows 10+ + if (enable_color && !(dwMode & 0x4)) { + SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4) + } + // Set console output codepage to UTF8 + SetConsoleOutputCP(65001); // CP_UTF8 + } + void* hConIn = GetStdHandle((unsigned long)-10); // STD_INPUT_HANDLE (-10) + if (hConIn && hConIn != (void*)-1 && GetConsoleMode(hConIn, &dwMode)) { + // Set console input codepage to UTF8 + SetConsoleCP(65001); // CP_UTF8 + } +} +#endif diff --git a/examples/common.h b/examples/common.h index 8caefd859..1505aa927 100644 --- a/examples/common.h +++ b/examples/common.h @@ -63,3 +63,33 @@ std::string gpt_random_prompt(std::mt19937 & rng); // std::vector llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos); + +// +// Console utils +// + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" +#define ANSI_COLOR_RESET "\x1b[0m" +#define ANSI_BOLD "\x1b[1m" + +enum console_color_t { + CONSOLE_COLOR_DEFAULT=0, + CONSOLE_COLOR_PROMPT, + CONSOLE_COLOR_USER_INPUT +}; + +struct console_state { + bool use_color = false; + console_color_t color = CONSOLE_COLOR_DEFAULT; +}; + +void set_console_color(console_state & con_st, console_color_t color); + +#if defined (_WIN32) +void win32_console_init(bool enable_color); +#endif diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 66b7c2d5d..d5ab2cf75 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -18,58 +18,13 @@ #include #endif -#if defined (_WIN32) -#pragma comment(lib,"kernel32.lib") -extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle); -extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode); -extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode); -extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID); -extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID); -#endif - -#define ANSI_COLOR_RED "\x1b[31m" -#define ANSI_COLOR_GREEN "\x1b[32m" -#define ANSI_COLOR_YELLOW "\x1b[33m" -#define ANSI_COLOR_BLUE "\x1b[34m" -#define ANSI_COLOR_MAGENTA "\x1b[35m" -#define ANSI_COLOR_CYAN "\x1b[36m" -#define ANSI_COLOR_RESET "\x1b[0m" -#define ANSI_BOLD "\x1b[1m" - -/* Keep track of current color of output, and emit ANSI code if it changes. */ -enum console_state { - CONSOLE_STATE_DEFAULT=0, - CONSOLE_STATE_PROMPT, - CONSOLE_STATE_USER_INPUT -}; - -static console_state con_st = CONSOLE_STATE_DEFAULT; -static bool con_use_color = false; - -void set_console_state(console_state new_st) { - if (!con_use_color) return; - // only emit color code if state changed - if (new_st != con_st) { - con_st = new_st; - switch(con_st) { - case CONSOLE_STATE_DEFAULT: - printf(ANSI_COLOR_RESET); - return; - case CONSOLE_STATE_PROMPT: - printf(ANSI_COLOR_YELLOW); - return; - case CONSOLE_STATE_USER_INPUT: - printf(ANSI_BOLD ANSI_COLOR_GREEN); - return; - } - } -} +static console_state con_st; static bool is_interacting = false; #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) void sigint_handler(int signo) { - set_console_state(CONSOLE_STATE_DEFAULT); + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); printf("\n"); // this also force flush stdout. if (signo == SIGINT) { if (!is_interacting) { @@ -81,32 +36,6 @@ void sigint_handler(int signo) { } #endif -#if defined (_WIN32) -void win32_console_init(void) { - unsigned long dwMode = 0; - void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11) - if (!hConOut || hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode)) { - hConOut = GetStdHandle((unsigned long)-12); // STD_ERROR_HANDLE (-12) - if (hConOut && (hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode))) { - hConOut = 0; - } - } - if (hConOut) { - // Enable ANSI colors on Windows 10+ - if (con_use_color && !(dwMode & 0x4)) { - SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4) - } - // Set console output codepage to UTF8 - SetConsoleOutputCP(65001); // CP_UTF8 - } - void* hConIn = GetStdHandle((unsigned long)-10); // STD_INPUT_HANDLE (-10) - if (hConIn && hConIn != (void*)-1 && GetConsoleMode(hConIn, &dwMode)) { - // Set console input codepage to UTF8 - SetConsoleCP(65001); // CP_UTF8 - } -} -#endif - int main(int argc, char ** argv) { gpt_params params; params.model = "models/llama-7B/ggml-model.bin"; @@ -115,13 +44,12 @@ int main(int argc, char ** argv) { return 1; } - // save choice to use color for later // (note for later: this is a slightly awkward choice) - con_use_color = params.use_color; + con_st.use_color = params.use_color; #if defined (_WIN32) - win32_console_init(); + win32_console_init(params.use_color); #endif if (params.perplexity) { @@ -218,7 +146,10 @@ int main(int argc, char ** argv) { return 1; } - params.n_keep = std::min(params.n_keep, (int) embd_inp.size()); + // number of tokens to keep when resetting context + if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size() || params.instruct) { + params.n_keep = (int)embd_inp.size(); + } // prefix & suffix for instruct mode const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true); @@ -226,16 +157,12 @@ int main(int argc, char ** argv) { // in instruct mode, we inject a prefix and a suffix to each input by the user if (params.instruct) { - params.interactive = true; + params.interactive_start = true; params.antiprompt.push_back("### Instruction:\n\n"); } - // enable interactive mode if reverse prompt is specified - if (params.antiprompt.size() != 0) { - params.interactive = true; - } - - if (params.interactive_start) { + // enable interactive mode if reverse prompt or interactive start is specified + if (params.antiprompt.size() != 0 || params.interactive_start) { params.interactive = true; } @@ -297,17 +224,18 @@ int main(int argc, char ** argv) { #endif " - Press Return to return control to LLaMa.\n" " - If you want to submit another line, end your input in '\\'.\n\n"); - is_interacting = params.interactive_start || params.instruct; + is_interacting = params.interactive_start; } - bool input_noecho = false; + bool is_antiprompt = false; + bool input_noecho = false; int n_past = 0; int n_remain = params.n_predict; int n_consumed = 0; // the first thing we will do is to output the prompt, so set color accordingly - set_console_state(CONSOLE_STATE_PROMPT); + set_console_color(con_st, CONSOLE_COLOR_PROMPT); std::vector embd; @@ -408,36 +336,38 @@ int main(int argc, char ** argv) { } // reset color to default if we there is no pending user input if (!input_noecho && (int)embd_inp.size() == n_consumed) { - set_console_state(CONSOLE_STATE_DEFAULT); + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); } // in interactive mode, and not currently processing queued inputs; // check if we should prompt the user for more if (params.interactive && (int) embd_inp.size() <= n_consumed) { - // check for reverse prompt - std::string last_output; - for (auto id : last_n_tokens) { - last_output += llama_token_to_str(ctx, id); - } - // Check if each of the reverse prompts appears at the end of the output. - for (std::string & antiprompt : params.antiprompt) { - if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) { - is_interacting = true; - set_console_state(CONSOLE_STATE_USER_INPUT); - fflush(stdout); - break; + // check for reverse prompt + if (params.antiprompt.size()) { + std::string last_output; + for (auto id : last_n_tokens) { + last_output += llama_token_to_str(ctx, id); + } + + is_antiprompt = false; + // Check if each of the reverse prompts appears at the end of the output. + for (std::string & antiprompt : params.antiprompt) { + if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) { + is_interacting = true; + is_antiprompt = true; + set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); + fflush(stdout); + break; + } } } if (n_past > 0 && is_interacting) { // potentially set color to indicate we are taking user input - set_console_state(CONSOLE_STATE_USER_INPUT); + set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); if (params.instruct) { - n_consumed = embd_inp.size(); - embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end()); - printf("\n> "); } @@ -463,17 +393,29 @@ int main(int argc, char ** argv) { } while (another_line); // done taking input, reset color - set_console_state(CONSOLE_STATE_DEFAULT); + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); - auto line_inp = ::llama_tokenize(ctx, buffer, false); - embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); + // Add tokens to embd only if the input buffer is non-empty + // Entering a empty line lets the user pass control back + if (buffer.length() > 1) { - if (params.instruct) { - embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); + // instruct mode: insert instruction prefix + if (params.instruct && !is_antiprompt) { + n_consumed = embd_inp.size(); + embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end()); + } + + auto line_inp = ::llama_tokenize(ctx, buffer, false); + embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); + + // instruct mode: insert response suffix + if (params.instruct) { + embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); + } + + n_remain -= line_inp.size(); } - n_remain -= line_inp.size(); - input_noecho = true; // do not echo this again } @@ -506,7 +448,7 @@ int main(int argc, char ** argv) { llama_print_timings(ctx); llama_free(ctx); - set_console_state(CONSOLE_STATE_DEFAULT); + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); return 0; } From a6bdc47cba23713a22ade47dd65b6afeb8009ff4 Mon Sep 17 00:00:00 2001 From: slaren <2141330+slaren@users.noreply.github.com> Date: Tue, 28 Mar 2023 16:26:55 +0200 Subject: [PATCH 15/76] Fix usage of F16C intrinsics in AVX code (#563) * Fix usage of F16C intrinsics in AVX code when F16C is not defined --- ggml.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index ec00e6317..ba89b5d84 100644 --- a/ggml.c +++ b/ggml.c @@ -1122,13 +1122,36 @@ void dequantize_row_q4_1(const void * restrict x, float * restrict y, int k) { #define GGML_F16_EPR 8 // F16 arithmetic is not supported by AVX, so we use F32 instead -// we take advantage of the _mm256_cvt intrinsics to convert F16 <-> F32 #define GGML_F32Cx8 __m256 #define GGML_F32Cx8_ZERO _mm256_setzero_ps() #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) + +#if defined(__F16C__) +// the _mm256_cvt intrinsics require F16C #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) +#else +static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { + float tmp[8]; + + for (int i = 0; i < 8; i++) + tmp[i] = GGML_FP16_TO_FP32(x[i]); + + return _mm256_loadu_ps(tmp); +} +static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { + float arr[8]; + + _mm256_storeu_ps(arr, y); + + for (int i = 0; i < 8; i++) + x[i] = GGML_FP16_TO_FP32(arr[i]); +} +#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) +#define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) +#endif + #define GGML_F32Cx8_FMA GGML_F32x8_FMA #define GGML_F32Cx8_ADD _mm256_add_ps #define GGML_F32Cx8_MUL _mm256_mul_ps From 28ba975aea1dcae2f31770516f5d542ff177771e Mon Sep 17 00:00:00 2001 From: dotpy314 <33351922+dotpy314@users.noreply.github.com> Date: Tue, 28 Mar 2023 23:06:28 +0800 Subject: [PATCH 16/76] Check the existence of f16_model_path_base in quantize.py (#574) Co-authored-by: Jincheng Miao --- quantize.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/quantize.py b/quantize.py index 16b5963d3..641df8dda 100644 --- a/quantize.py +++ b/quantize.py @@ -74,6 +74,10 @@ def main(): args.models_path, model, "ggml-model-f16.bin" ) + if not os.path.isfile(f16_model_path_base): + print(f'The file %s was not found' % f16_model_path_base) + sys.exit(1) + f16_model_parts_paths = map( lambda filename: os.path.join(f16_model_path_base, filename), glob.glob(f"{f16_model_path_base}*") From e0670260fb50a882b37074112b1881fb0820cf77 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 28 Mar 2023 18:34:35 +0300 Subject: [PATCH 17/76] gitignore : add "embedding" --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ce01fd541..053311fee 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ models/* /quantize /result /perplexity +/embedding arm_neon.h compile_commands.json From c1f885067c61191a07a1aedf684168dda62f3f71 Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Tue, 28 Mar 2023 15:56:03 +0000 Subject: [PATCH 18/76] ggml : introduce structs for the q4 data blocks (#356) * Introduce structs for the q4 data blocks * ggml : rename quant struct variables + fix ARM_NEON --------- Co-authored-by: Georgi Gerganov --- examples/quantize/quantize.cpp | 4 +- ggml.c | 359 +++++++++++++-------------------- ggml.h | 4 +- llama.cpp | 11 +- llama.h | 3 +- tests/test-quantize.c | 4 +- 6 files changed, 150 insertions(+), 235 deletions(-) diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index f0230f5dc..3888ff587 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -4,8 +4,6 @@ #include #include -const int QK = 32; - // usage: // ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type // @@ -39,7 +37,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype, QK)) { + if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype)) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } diff --git a/ggml.c b/ggml.c index ba89b5d84..bf8ec8ab2 100644 --- a/ggml.c +++ b/ggml.c @@ -448,17 +448,27 @@ static inline __m128i packNibbles( __m256i bytes ) // method 5 // blocks of QK elements // represented with a single float (delta) and QK/2 8-bit ints (i.e QK 4-bit signed integer factors) +typedef struct { + float d; // delta + uint8_t qs[QK / 2]; // nibbles / quants +} block_q4_0; +static_assert(sizeof(block_q4_0) == sizeof(float) + QK / 2, "wrong q4_0 block size/padding"); + +// method 4 +// blocks of QK elements +// represented with 2 floats (delta + min) and QK/2 8-bit ints (i.e QK 4-bit unsigned integer factors) +typedef struct { + float d; + float m; + uint8_t qs[QK / 2]; // nibbles / quants +} block_q4_1; +static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK / 2, "wrong q4_1 block size/padding"); // reference implementation for deterministic creation of model files -static void quantize_row_q4_0_reference(const float * restrict x, void * restrict y, int k) { +static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) { assert(k % QK == 0); const int nb = k / QK; - const size_t bs = sizeof(float) + QK/2; - - uint8_t * restrict pd = ((uint8_t *)y + 0*bs); - uint8_t * restrict pb = ((uint8_t *)y + 0*bs + sizeof(float)); - uint8_t pp[QK/2]; for (int i = 0; i < nb; i++) { @@ -472,8 +482,7 @@ static void quantize_row_q4_0_reference(const float * restrict x, void * restric const float d = amax / ((1 << 3) - 1); const float id = d ? 1.0f/d : 0.0f; - *(float *)pd = d; - pd += bs; + y[i].d = d; for (int l = 0; l < QK; l += 2) { const float v0 = x[i*QK + l + 0]*id; @@ -488,23 +497,15 @@ static void quantize_row_q4_0_reference(const float * restrict x, void * restric pp[l/2] = vi0 | (vi1 << 4); } - memcpy(pb, pp, sizeof(pp)); - pb += bs; + memcpy(y[i].qs, pp, sizeof(pp)); } } -void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { +static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int k) { assert(k % QK == 0); - -#if defined(__ARM_NEON) || defined(__AVX2__) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__) const int nb = k / QK; - const size_t bs = sizeof(float) + QK/2; - uint8_t * restrict pd = ((uint8_t *)y + 0*bs); - uint8_t * restrict pb = ((uint8_t *)y + 0*bs + sizeof(float)); - - uint8_t pp[QK/2]; -#endif + block_q4_0 * restrict y = vy; #if defined(__POWER9_VECTOR__) const vector float v85 = vec_splats(8.5f); @@ -532,10 +533,10 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { const float d = amax / ((1 << 3) - 1); const float id = d ? 1.0/d : 0.0; - *(float *)pd = d; - pd += bs; + y[i].d = d; const vector float vid = vec_splats(id); + uint8_t * restrict pb = y[i].qs; for (int l = 0; l < 8; l++) { const vector float vf = vec_madd(srcv[l], vid, v85); const vector signed int vi = vec_signed(vf); @@ -543,11 +544,9 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { pb[2*l + 0] = vec_extract(vi, 0) | (vec_extract(vi, 1) << 4); pb[2*l + 1] = vec_extract(vi, 2) | (vec_extract(vi, 3) << 4); } - - //memcpy(pb, pp, sizeof(pp)); - pb += bs; } #elif __ARM_NEON + uint8_t pp[QK/2]; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max @@ -569,8 +568,7 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { const float d = amax / ((1 << 3) - 1); const float id = d ? 1.0/d : 0.0; - *(float *)pd = d; - pd += bs; + y[i].d = d; for (int l = 0; l < 8; l++) { const float32x4_t v = vmulq_n_f32(srcv[l], id); @@ -581,8 +579,7 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { pp[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); } - memcpy(pb, pp, sizeof(pp)); - pb += bs; + memcpy(y[i].qs, pp, sizeof(pp)); } #elif defined(__AVX2__) for (int i = 0; i < nb; i++) { @@ -607,8 +604,7 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { // Quantize these floats const float d = maxScalar / 7.0f; - *(float *)pd = d; - pd += bs; + y[i].d = d; const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); @@ -648,10 +644,10 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { // Compress the vector into 4 bit/value, and store __m128i res = packNibbles( i0 ); - _mm_storeu_si128( ( __m128i* )pb, res ); - pb += bs; + _mm_storeu_si128( ( __m128i* )y[i].qs, res ); } #elif defined(__wasm_simd128__) + uint8_t pp[QK/2]; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max @@ -673,8 +669,7 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { const float d = amax / ((1 << 3) - 1); const float id = d ? 1.0/d : 0.0; - *(float *)pd = d; - pd += bs; + y[i].d = d; for (int l = 0; l < 8; l++) { const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id)); @@ -685,8 +680,7 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { pp[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4); } - memcpy(pb, pp, sizeof(pp)); - pb += bs; + memcpy(y[i].qs, pp, sizeof(pp)); } #else // scalar @@ -694,18 +688,11 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { #endif } -// method 4 -// blocks of QK elements -// represented with 2 floats (min + delta) and QK/2 8-bit ints (i.e QK 4-bit unsigned integer factors) -void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { +static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) { assert(k % QK == 0); - const int nb = k / QK; - const size_t bs = 2*sizeof(float) + QK/2; - uint8_t * restrict pd = ((uint8_t *)y + 0*bs); - uint8_t * restrict pm = ((uint8_t *)y + 0*bs + sizeof(float)); - uint8_t * restrict pb = ((uint8_t *)y + 0*bs + 2*sizeof(float)); + block_q4_1 * restrict y = vy; uint8_t pp[QK/2]; @@ -722,10 +709,8 @@ void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; - *(float *)pm = min; - *(float *)pd = d; - pm += bs; - pd += bs; + y[i].d = d; + y[i].m = min; for (int l = 0; l < QK; l += 2) { const float v0 = (x[i*QK + l + 0] - min)*id; @@ -740,27 +725,22 @@ void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { pp[l/2] = vi0 | (vi1 << 4); } - memcpy(pb, pp, sizeof(pp)); - pb += bs; + memcpy(y[i].qs, pp, sizeof(pp)); } } -// TODO: vectorize -void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) { +static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) { assert(k % QK == 0); - const int nb = k / QK; - const size_t bs = sizeof(float) + QK/2; - const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs); - const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + sizeof(float)); + const block_q4_0 * restrict x = vx; #if defined(__AVX2__) for (int i = 0; i < nb; i++) { // scale factor - const __m256 d_v = _mm256_broadcast_ss((const float *) (pd + i*bs)); + const __m256 d_v = _mm256_broadcast_ss(&x[i].d); - const uint8_t * restrict pp = pb + i*bs; + const uint8_t * restrict pp = x[i].qs; for (int l = 0; l < QK; l += 32) { // Load 32x4-bit integers into 32x8-bit integers @@ -790,17 +770,15 @@ void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) { } #elif defined(__ARM_NEON) for (int i = 0; i < nb; i++) { - const float d = *(const float *) (pd + i*bs); + const float32x4_t vd = vdupq_n_f32(x[i].d); - const uint8_t * restrict pp = pb + i*bs; - - const float32x4_t vd = vdupq_n_f32(d); + const uint8_t * restrict pp = x[i].qs; for (int l = 0; l < QK; l += 16) { // Load 16x4-bit integers into 8x8-bit integers const uint8x8_t v8 = vld1_u8(pp + l/2); - // Expand 4-bit nibbles to 8-bit bytes + // Expand 4-bit qs to 8-bit bytes const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f)); const uint8x8_t v1 = vshr_n_u8(v8, 4); @@ -844,9 +822,9 @@ void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) { #else // scalar for (int i = 0; i < nb; i++) { - const float d = *(const float *) (pd + i*bs); + const float d = x[i].d; - const uint8_t * restrict pp = pb + i*bs; + const uint8_t * restrict pp = x[i].qs; for (int l = 0; l < QK; l += 2) { const uint8_t vi = pp[l/2]; @@ -869,22 +847,18 @@ void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) { #endif } -void dequantize_row_q4_1(const void * restrict x, float * restrict y, int k) { +static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, int k) { assert(k % QK == 0); - const int nb = k / QK; - const size_t bs = 2*sizeof(float) + QK/2; - const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs); - const uint8_t * restrict pm = ((const uint8_t *)x + 0*bs + sizeof(float)); - const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + 2*sizeof(float)); + const block_q4_1 * restrict x = vx; #if defined(__AVX2__) for (int i = 0; i < nb; i++) { - const __m256 d_v = _mm256_broadcast_ss((const float *) (pd + i*bs)); - const __m256 d_m = _mm256_broadcast_ss((const float *) (pm + i*bs)); + const __m256 d_v = _mm256_broadcast_ss(&x[i].d); + const __m256 d_m = _mm256_broadcast_ss(&x[i].m); - const uint8_t * restrict pp = pb + i*bs; + const uint8_t * restrict pp = x[i].qs; for (int l = 0; l < QK; l += 32) { // Load 32x4-bit integers into 32x8-bit integers @@ -911,10 +885,10 @@ void dequantize_row_q4_1(const void * restrict x, float * restrict y, int k) { } #else for (int i = 0; i < nb; i++) { - const float d = *(const float *) (pd + i*bs); - const float m = *(const float *) (pm + i*bs); + const float d = x[i].d; + const float m = x[i].m; - const uint8_t * restrict pp = pb + i*bs; + const uint8_t * restrict pp = x[i].qs; for (int l = 0; l < QK; l += 2) { const uint8_t vi = pp[l/2]; @@ -1502,25 +1476,15 @@ inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float #if __AVX512F__ && QK == 32 static inline __m512 dot_q4_0_oneblock_avx512( __m512 acc, - const uint8_t * pd0, - const uint8_t * pd1, - const uint8_t * pb0, - const uint8_t * pb1, - size_t bs, + const block_q4_0 * restrict x, + const block_q4_0 * restrict y, int i ) { - const float * d0_0 = (const float *) (pd0 + i*bs); - const float * d1_0 = (const float *) (pd1 + i*bs); - - const uint8_t * restrict p0 = pb0 + (i+0)*bs; - const uint8_t * restrict p1 = pb1 + (i+0)*bs; - // Compute combined scale for the block - float scaleScalar = d0_0[0] * d1_0[0]; - __m512 scale = _mm512_set1_ps( scaleScalar ); + __m512 d = _mm512_set1_ps( x[i].d * y[i].d ); - __m256i bx = bytesFromNibbles( p0 ); - __m256i by = bytesFromNibbles( p1 ); + __m256i bx = bytesFromNibbles( x[i].qs ); + __m256i by = bytesFromNibbles( y[i].qs ); // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. const __m256i off = _mm256_set1_epi8( 8 ); @@ -1536,7 +1500,7 @@ static inline __m512 dot_q4_0_oneblock_avx512( // Convert int32_t to float __m512 p = _mm512_cvtepi32_ps( i64 ); // Apply the scale, and accumulate - return _mm512_fmadd_ps( scale, p, acc ); + return _mm512_fmadd_ps( d, p, acc ); } #endif @@ -1576,19 +1540,14 @@ inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t *s = sumf; } -inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict x, const void * restrict y) { +inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int nb = n / QK; assert(n % QK == 0); assert(nb % 2 == 0); - const size_t bs = sizeof(float) + QK/2; - - const uint8_t * restrict pd0 = ((const uint8_t *)x + 0*bs); - const uint8_t * restrict pd1 = ((const uint8_t *)y + 0*bs); - - const uint8_t * restrict pb0 = ((const uint8_t *)x + 0*bs + sizeof(float)); - const uint8_t * restrict pb1 = ((const uint8_t *)y + 0*bs + sizeof(float)); + const block_q4_0 * restrict x = vx; + const block_q4_0 * restrict y = vy; float sumf = 0.0; @@ -1597,23 +1556,18 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void float sum1 = 0.0f; for (int i = 0; i < nb; i += 2) { - const float d0_0 = *(const float *) (pd0 + i*bs); - const float d1_0 = *(const float *) (pd1 + i*bs); - const float d0_1 = *(const float *) (pd0 + (i + 1)*bs); - const float d1_1 = *(const float *) (pd1 + (i + 1)*bs); - - //printf("d0_0: %f, d1_0: %f, d0_1: %f, d1_1: %f\n", d0_0, d1_0, d0_1, d1_1); - - const uint8_t * restrict p0 = pb0 + i*bs; - const uint8_t * restrict p1 = pb1 + i*bs; + const block_q4_0 * restrict x0 = &x[i + 0]; + const block_q4_0 * restrict y0 = &y[i + 0]; + const block_q4_0 * restrict x1 = &x[i + 1]; + const block_q4_0 * restrict y1 = &y[i + 1]; const uint8x16_t m4b = vdupq_n_u8(0xf); const int8x16_t s8b = vdupq_n_s8(0x8); - const uint8x16_t v0_0 = vld1q_u8(p0); - const uint8x16_t v1_0 = vld1q_u8(p1); - const uint8x16_t v0_1 = vld1q_u8(p0 + bs); - const uint8x16_t v1_1 = vld1q_u8(p1 + bs); + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v1_0 = vld1q_u8(y0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + const uint8x16_t v1_1 = vld1q_u8(y1->qs); // 4-bit -> 8-bit const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); @@ -1651,11 +1605,11 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void // scalar #if defined(__ARM_FEATURE_QRDMX) - sum0 += d0_0*d1_0*vaddvq_s32(p_0); - sum1 += d0_1*d1_1*vaddvq_s32(p_1); + sum0 += x0->d * y0->d * vaddvq_s32(p_0); + sum1 += x1->d * y1->d * vaddvq_s32(p_1); #else - sum0 += d0_0*d1_0*(vgetq_lane_s32(p_0, 0) + vgetq_lane_s32(p_0, 1) + vgetq_lane_s32(p_0, 2) + vgetq_lane_s32(p_0, 3)); - sum1 += d0_1*d1_1*(vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3)); + sum0 += x0->d * y0->d * (vgetq_lane_s32(p_0, 0) + vgetq_lane_s32(p_0, 1) + vgetq_lane_s32(p_0, 2) + vgetq_lane_s32(p_0, 3)); + sum1 += x1->d * y1->d * (vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3)); #endif #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls)); @@ -1681,11 +1635,11 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void // scalar #if defined(__ARM_FEATURE_QRDMX) - sum0 += d0_0*d1_0*vaddvq_s16(p_0); - sum1 += d0_1*d1_1*vaddvq_s16(p_1); + sum0 += x0->d * y0->d * vaddvq_s16(p_0); + sum1 += x1->d * y1->d * vaddvq_s16(p_1); #else - sum0 += d0_0*d1_0*(vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7)); - sum1 += d0_1*d1_1*(vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7)); + sum0 += x0->d * y0->d * (vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7)); + sum1 += x1->d * y1->d * (vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7)); #endif #endif } @@ -1703,19 +1657,19 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void for (int superblock_ix = 0; superblock_ix < superblock_count; superblock_ix += 1) { int i = superblock_ix * superblock_size; - acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+0 ); - acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+1 ); - acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+2 ); - acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+3 ); - acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+4 ); - acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+5 ); - acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+6 ); - acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+7 ); + acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+0 ); + acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+1 ); + acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+2 ); + acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+3 ); + acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+4 ); + acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+5 ); + acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+6 ); + acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+7 ); } // Remainders for (int i = superblock_count * superblock_size; i < nb; ++i) { - acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i ); + acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i ); } // Horizontal sum of all lanes of the accumulator @@ -1726,18 +1680,12 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void // Main loop for (int i = 0; i < nb; ++i) { - const float * d0_0 = (const float *) (pd0 + i*bs); - const float * d1_0 = (const float *) (pd1 + i*bs); - - const uint8_t * restrict p0 = pb0 + i*bs; - const uint8_t * restrict p1 = pb1 + i*bs; - // Compute combined scale for the block - const __m256 scale = _mm256_mul_ps( _mm256_broadcast_ss( d0_0 ), _mm256_broadcast_ss( d1_0 ) ); + const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) ); // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - __m256i bx = bytesFromNibbles( p0 ); - __m256i by = bytesFromNibbles( p1 ); + __m256i bx = bytesFromNibbles( x[i].qs ); + __m256i by = bytesFromNibbles( y[i].qs ); // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. const __m256i off = _mm256_set1_epi8( 8 ); @@ -1759,7 +1707,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void // Convert int32_t to float __m256 p = _mm256_cvtepi32_ps( i32 ); // Apply the scale, and accumulate - acc = _mm256_fmadd_ps( scale, p, acc ); + acc = _mm256_fmadd_ps( d, p, acc ); } // Return horizontal sum of the acc vector @@ -1775,21 +1723,18 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void float sum1 = 0.0f; for (int i = 0; i < nb; i += 2) { - const float d0_0 = *(const float *) (pd0 + i*bs); - const float d1_0 = *(const float *) (pd1 + i*bs); - const float d0_1 = *(const float *) (pd0 + (i + 1)*bs); - const float d1_1 = *(const float *) (pd1 + (i + 1)*bs); - - const uint8_t * restrict p0 = pb0 + i*bs; - const uint8_t * restrict p1 = pb1 + i*bs; + const block_q4_0 * restrict x0 = &px[i + 0]; + const block_q4_0 * restrict y0 = &py[i + 0]; + const block_q4_0 * restrict x1 = &px[i + 1]; + const block_q4_0 * restrict y1 = &py[i + 1]; const v128_t m4b = wasm_u8x16_splat(0xf); const v128_t s8b = wasm_i8x16_splat(0x8); - const v128_t v0_0 = wasm_v128_load(p0); - const v128_t v0_1 = wasm_v128_load(p0 + bs); - const v128_t v1_0 = wasm_v128_load(p1); - const v128_t v1_1 = wasm_v128_load(p1 + bs); + const v128_t v0_0 = wasm_v128_load(x0.qs); + const v128_t v0_1 = wasm_v128_load(y0.qs); + const v128_t v1_0 = wasm_v128_load(x1.qs); + const v128_t v1_1 = wasm_v128_load(y1.qs); // 4-bit -> 8-bit const v128_t v0_0l = wasm_v128_and(v0_0, m4b); @@ -1839,12 +1784,12 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void const v128_t p_0 = wasm_i16x8_add(pl_0, ph_0); const v128_t p_1 = wasm_i16x8_add(pl_1, ph_1); - sum0 += d0_0*d1_0*( + sum0 += x0->d * y0->d * ( wasm_i16x8_extract_lane(p_0, 0) + wasm_i16x8_extract_lane(p_0, 1) + wasm_i16x8_extract_lane(p_0, 2) + wasm_i16x8_extract_lane(p_0, 3) + wasm_i16x8_extract_lane(p_0, 4) + wasm_i16x8_extract_lane(p_0, 5) + wasm_i16x8_extract_lane(p_0, 6) + wasm_i16x8_extract_lane(p_0, 7)); - sum1 += d0_1*d1_1*( + sum1 += x1->d * y1->d * ( wasm_i16x8_extract_lane(p_1, 0) + wasm_i16x8_extract_lane(p_1, 1) + wasm_i16x8_extract_lane(p_1, 2) + wasm_i16x8_extract_lane(p_1, 3) + wasm_i16x8_extract_lane(p_1, 4) + wasm_i16x8_extract_lane(p_1, 5) + @@ -1855,11 +1800,11 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void #else // scalar for (int i = 0; i < nb; i++) { - const float d0 = *(const float *) (pd0 + i*bs); - const float d1 = *(const float *) (pd1 + i*bs); + const float d0 = x[i].d; + const float d1 = y[i].d; - const uint8_t * restrict p0 = pb0 + i*bs; - const uint8_t * restrict p1 = pb1 + i*bs; + const uint8_t * restrict p0 = x[i].qs; + const uint8_t * restrict p1 = y[i].qs; for (int j = 0; j < QK/2; j++) { const uint8_t v0 = p0[j]; @@ -1879,19 +1824,11 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void *s = sumf; } -inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict x, const void * restrict y) { +inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int nb = n / QK; - const size_t bs = 2*sizeof(float) + QK/2; - - const uint8_t * restrict pd0 = ((const uint8_t *)x + 0*bs); - const uint8_t * restrict pd1 = ((const uint8_t *)y + 0*bs); - - const uint8_t * restrict pm0 = ((const uint8_t *)x + 0*bs + sizeof(float)); - const uint8_t * restrict pm1 = ((const uint8_t *)y + 0*bs + sizeof(float)); - - const uint8_t * restrict pb0 = ((const uint8_t *)x + 0*bs + 2*sizeof(float)); - const uint8_t * restrict pb1 = ((const uint8_t *)y + 0*bs + 2*sizeof(float)); + const block_q4_1 * restrict x = vx; + const block_q4_1 * restrict y = vy; float sumf = 0.0; @@ -1903,21 +1840,17 @@ inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void // Main loop for (int i = 0; i < nb; ++i) { - const float * m0 = (const float *) (pm0 + i*bs); - const float * m1 = (const float *) (pm1 + i*bs); + const float * d0 = &x[i].d; + const float * d1 = &y[i].d; - const float * d0 = (const float *) (pd0 + i*bs); - const float * d1 = (const float *) (pd1 + i*bs); - - const uint8_t * restrict p0 = pb0 + i*bs; - const uint8_t * restrict p1 = pb1 + i*bs; + const float * m0 = &x[i].m; + const float * m1 = &y[i].m; const __m256 d0v = _mm256_broadcast_ss( d0 ); const __m256 d1v = _mm256_broadcast_ss( d1 ); const __m256 m0v = _mm256_broadcast_ss( m0 ); const __m256 m1v = _mm256_broadcast_ss( m1 ); - // Compute combined scale for the block const __m256 scale_01 = _mm256_mul_ps( d0v, d1v ); @@ -1927,8 +1860,8 @@ inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0b10101010 ); // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - __m256i bx = bytesFromNibbles( p0 ); - __m256i by = bytesFromNibbles( p1 ); + __m256i bx = bytesFromNibbles( x[i].qs ); + __m256i by = bytesFromNibbles( y[i].qs ); // Now we have a vector with bytes in [ 0 .. 15 ] interval. @@ -1973,14 +1906,14 @@ inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void #else // scalar for (int i = 0; i < nb; i++) { - const float m0 = *(const float *) (pm0 + i*bs); - const float m1 = *(const float *) (pm1 + i*bs); + const float d0 = x[i].d; + const float d1 = y[i].d; - const float d0 = *(const float *) (pd0 + i*bs); - const float d1 = *(const float *) (pd1 + i*bs); + const float m0 = x[i].m; + const float m1 = y[i].m; - const uint8_t * restrict p0 = pb0 + i*bs; - const uint8_t * restrict p1 = pb1 + i*bs; + const uint8_t * restrict p0 = x[i].qs; + const uint8_t * restrict p1 = y[i].qs; for (int j = 0; j < QK/2; j++) { const uint8_t v0 = p0[j]; @@ -2251,8 +2184,8 @@ static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = { static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5"); static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { - sizeof(float ) + QK/2, - sizeof(float )*2 + QK/2, + sizeof(block_q4_0), + sizeof(block_q4_1), sizeof(int8_t ), sizeof(int16_t), sizeof(int32_t), @@ -10369,64 +10302,50 @@ enum ggml_opt_result ggml_opt( //////////////////////////////////////////////////////////////////////////////// -size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int qk, int64_t * hist) { - const int nb = k / qk; - const size_t bs = (sizeof(float) + sizeof(uint8_t)*qk/2); - const size_t row_size = nb*bs; - - assert(k % qk == 0); - - char * pdst = (char *) dst; +size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) { + assert(k % QK == 0); + const int nb = k / QK; for (int j = 0; j < n; j += k) { - uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs); - uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + sizeof(float)); + block_q4_0 * restrict y = (block_q4_0 *)dst + j/QK; - quantize_row_q4_0_reference(src + j, pd, k); + quantize_row_q4_0_reference(src + j, y, k); for (int i = 0; i < nb; i++) { - for (int l = 0; l < qk; l += 2) { - const uint8_t vi0 = pb[l/2] & 0xF; - const uint8_t vi1 = pb[l/2] >> 4; + for (int l = 0; l < QK; l += 2) { + const uint8_t vi0 = y[i].qs[l/2] & 0xF; + const uint8_t vi1 = y[i].qs[l/2] >> 4; hist[vi0]++; hist[vi1]++; } - pb += bs; } } - return (n/k)*row_size; + return (n/QK*sizeof(block_q4_0)); } -size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int qk, int64_t * hist) { - const int nb = k / qk; - const size_t bs = (2*sizeof(float) + sizeof(uint8_t)*qk/2); - const size_t row_size = nb*bs; - - assert(k % qk == 0); - - char * pdst = (char *) dst; +size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) { + assert(k % QK == 0); + const int nb = k / QK; for (int j = 0; j < n; j += k) { - uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs); - uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + 2*sizeof(float)); + block_q4_1 * restrict y = (block_q4_1 *)dst + j/QK; - quantize_row_q4_1(src + j, pd, k); + quantize_row_q4_1(src + j, y, k); for (int i = 0; i < nb; i++) { - for (int l = 0; l < qk; l += 2) { - const uint8_t vi0 = pb[l/2] & 0xF; - const uint8_t vi1 = pb[l/2] >> 4; + for (int l = 0; l < QK; l += 2) { + const uint8_t vi0 = y[i].qs[l/2] & 0xF; + const uint8_t vi1 = y[i].qs[l/2] >> 4; hist[vi0]++; hist[vi1]++; } - pb += bs; } } - return (n/k)*row_size; + return (n/QK*sizeof(block_q4_1)); } //////////////////////////////////////////////////////////////////////////////// diff --git a/ggml.h b/ggml.h index ddb97318b..335230f9f 100644 --- a/ggml.h +++ b/ggml.h @@ -748,8 +748,8 @@ enum ggml_opt_result ggml_opt( // quantization // -size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int qk, int64_t * hist); -size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int qk, int64_t * hist); +size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist); +size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist); // // system info diff --git a/llama.cpp b/llama.cpp index 2bd520353..b0eab2e72 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1345,7 +1345,7 @@ static llama_vocab::id llama_sample_top_p_top_k( // // TODO: reuse code from the llama_model_load() somehow -bool llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, int itype, int qk) { +static bool llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, int itype) { ggml_type type = GGML_TYPE_Q4_1; switch (itype) { @@ -1568,11 +1568,11 @@ bool llama_model_quantize_internal(const std::string & fname_inp, const std::str switch (type) { case GGML_TYPE_Q4_0: { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], qk, hist_cur.data()); + cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; case GGML_TYPE_Q4_1: { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], qk, hist_cur.data()); + cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; default: { @@ -1711,9 +1711,8 @@ void llama_free(struct llama_context * ctx) { int llama_model_quantize( const char * fname_inp, const char * fname_out, - int itype, - int qk) { - if (!llama_model_quantize_internal(fname_inp, fname_out, itype, qk)) { + int itype) { + if (!llama_model_quantize_internal(fname_inp, fname_out, itype)) { fprintf(stderr, "%s: failed to quantize\n", __func__); return 1; } diff --git a/llama.h b/llama.h index ebf55f41c..d3f4cae61 100644 --- a/llama.h +++ b/llama.h @@ -81,8 +81,7 @@ extern "C" { LLAMA_API int llama_model_quantize( const char * fname_inp, const char * fname_out, - int itype, - int qk); + int itype); // Run the llama inference to obtain the logits and probabilities for the next token. // tokens + n_tokens is the provided batch of new tokens to process diff --git a/tests/test-quantize.c b/tests/test-quantize.c index d59ecb8ab..993e9dcc3 100644 --- a/tests/test-quantize.c +++ b/tests/test-quantize.c @@ -13,7 +13,7 @@ int main(void) { src[i] = (float)(i + 1); } - size_t size = ggml_quantize_q4_0(src, dst, QK, QK, QK, hist); + size_t size = ggml_quantize_q4_0(src, dst, QK, QK, hist); assert(size == 20); float max_result = ((float *)dst)[0]; float max_expected = src[31] / ((1 << 3) - 1); @@ -24,7 +24,7 @@ int main(void) { assert(q4_result == q4_expected); } - size = ggml_quantize_q4_1(src, dst, QK, QK, QK, hist); + size = ggml_quantize_q4_1(src, dst, QK, QK, hist); assert(size == 24); float delta_result = ((float *)dst)[0]; float delta_expected = (src[31] - src[0]) / ((1 << 4) - 1); From 20e1e84884376b3fb44ffbfd48d478b2934b0b5e Mon Sep 17 00:00:00 2001 From: Jed Fox Date: Tue, 28 Mar 2023 11:39:01 -0500 Subject: [PATCH 19/76] deploy : add a Package.swift for SwiftPM support (#393) * Add a Package.swift for SwiftPM support * Swap from exclusions to allowlist --- .gitignore | 5 +++++ Package.swift | 20 ++++++++++++++++++++ spm-headers/llama.h | 1 + 3 files changed, 26 insertions(+) create mode 100644 Package.swift create mode 120000 spm-headers/llama.h diff --git a/.gitignore b/.gitignore index 053311fee..741c6b4ea 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ .vscode/ .DS_Store +.build/ build/ build-em/ build-debug/ @@ -27,3 +28,7 @@ compile_commands.json .envrc .direnv/ + +.venv +__pycache__ +.swiftpm diff --git a/Package.swift b/Package.swift new file mode 100644 index 000000000..79d13c82d --- /dev/null +++ b/Package.swift @@ -0,0 +1,20 @@ +// swift-tools-version:5.3 + +import PackageDescription + +let package = Package( + name: "llama", + products: [ + .library(name: "llama", targets: ["llama"]), + ], + targets: [ + .target( + name: "llama", + path: ".", + sources: ["ggml.c", "llama.cpp"], + publicHeadersPath: "spm-headers", + cSettings: [.unsafeFlags(["-Wno-shorten-64-to-32"])] + ), + ], + cxxLanguageStandard: .cxx11 +) diff --git a/spm-headers/llama.h b/spm-headers/llama.h new file mode 120000 index 000000000..9acceb980 --- /dev/null +++ b/spm-headers/llama.h @@ -0,0 +1 @@ +../llama.h \ No newline at end of file From 436e56193199a1625f8c561069f702e8840a9e08 Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Tue, 28 Mar 2023 16:48:20 +0000 Subject: [PATCH 20/76] all : be more strict about converting float to double (#458) * Be more strict about converting float to double * Test equivalence of round, SILU implementations Test module is commented out in CMakeLists.txt because the tests may take a long time, depending on how much the compiler optimizes. * Fix softmax in perplexity.cpp * all : prefer float over double where appropriate * perplexity : add --------- Co-authored-by: Georgi Gerganov --- CMakeLists.txt | 5 +- Makefile | 4 + examples/common.cpp | 6 +- examples/main/main.cpp | 11 +-- examples/perplexity/perplexity.cpp | 20 +++-- examples/quantize/quantize.cpp | 4 +- ggml.c | 138 +++++++++++++++-------------- llama.cpp | 52 +++++------ llama.h | 8 +- tests/CMakeLists.txt | 1 + tests/test-double-float.c | 53 +++++++++++ 11 files changed, 185 insertions(+), 117 deletions(-) create mode 100644 tests/test-double-float.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 27a222a16..241be4c15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -124,17 +124,18 @@ if (LLAMA_ALL_WARNINGS) -Wall -Wextra -Wpedantic - -Wshadow -Wcast-qual + -Wdouble-promotion + -Wshadow -Wstrict-prototypes -Wpointer-arith - -Wno-unused-function ) set(cxx_flags -Wall -Wextra -Wpedantic -Wcast-qual + -Wdouble-promotion ) else() # todo : msvc diff --git a/Makefile b/Makefile index 973b2951b..9cfa89f7a 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,10 @@ CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC LDFLAGS = +# warnings +CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function +CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function + # OS specific # TODO: support Windows ifeq ($(UNAME_S),Linux) diff --git a/examples/common.cpp b/examples/common.cpp index 880ebe9a2..af3ad9eb7 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -215,13 +215,13 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " prompt file to start generation.\n"); fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict); fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k); - fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); + fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", (double)params.top_p); fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n); - fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty); + fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", (double)params.repeat_penalty); fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx); fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating\n"); fprintf(stderr, " --memory_f32 use f32 instead of f16 for memory key+value\n"); - fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); + fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp); fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n"); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " --perplexity compute perplexity over the prompt\n"); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index d5ab2cf75..3130aef0c 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -209,7 +209,8 @@ int main(int argc, char ** argv) { fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str()); } } - fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty); + fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", + params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty); fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep); fprintf(stderr, "\n\n"); @@ -274,10 +275,10 @@ int main(int argc, char ** argv) { if ((int) embd_inp.size() <= n_consumed && !is_interacting) { // out of user input, sample next token - const float top_k = params.top_k; - const float top_p = params.top_p; - const float temp = params.temp; - const float repeat_penalty = params.repeat_penalty; + const int32_t top_k = params.top_k; + const float top_p = params.top_p; + const float temp = params.temp; + const float repeat_penalty = params.repeat_penalty; llama_token id = 0; diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 75d526d3d..07ed0a829 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1,15 +1,17 @@ #include "common.h" #include "llama.h" -std::vector softmax(const std::vector& logits) { - std::vector probs(logits.size()); +#include + +std::vector softmax(const std::vector& logits) { + std::vector probs(logits.size()); float max_logit = logits[0]; for (float v : logits) max_logit = std::max(max_logit, v); double sum_exp = 0.0; for (size_t i = 0; i < logits.size(); i++) { // Subtract the maximum logit value from the current logit value for numerical stability - float logit = logits[i] - max_logit; - double exp_logit = std::exp(logit); + const float logit = logits[i] - max_logit; + const float exp_logit = expf(logit); sum_exp += exp_logit; probs[i] = exp_logit; } @@ -24,14 +26,16 @@ void perplexity(llama_context * ctx, const gpt_params & params) { auto tokens = ::llama_tokenize(ctx, params.prompt, true); int count = 0; - double nll = 0.0; int seq_count = tokens.size() / params.n_ctx; + double nll = 0.0; + fprintf(stderr, "%s : calculating perplexity over %d chunks\n", __func__, seq_count); for (int i = 0; i < seq_count; ++i) { int start = i * params.n_ctx; - int end = start + params.n_ctx - 1; + int end = start + params.n_ctx - 1; // TODO: this is not optimal, e.g. it makes the batch 511 instead of 512 + // it is better to always be power of 2 for better performance std::vector embd(tokens.begin() + start, tokens.begin() + end); auto start_t = std::chrono::high_resolution_clock::now(); if (llama_eval(ctx, embd.data(), embd.size(), 0, params.n_threads)) { @@ -40,7 +44,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) { } auto end_t = std::chrono::high_resolution_clock::now(); if (i == 0) { - double seconds = std::chrono::duration(end_t - start_t).count(); + const float seconds = std::chrono::duration(end_t - start_t).count(); printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0)); } // We get the logits for all the tokens in the context window (params.n_ctx) @@ -63,7 +67,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) { std::vector tok_logits( logits + j * n_vocab, logits + (j + 1) * n_vocab); - double prob = softmax(tok_logits)[tokens[start + j + 1]]; + const float prob = softmax(tok_logits)[tokens[start + j + 1]]; nll += -std::log(prob); ++count; } diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 3888ff587..b444328ac 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -50,8 +50,8 @@ int main(int argc, char ** argv) { const int64_t t_main_end_us = ggml_time_us(); printf("\n"); - printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0); } return 0; diff --git a/ggml.c b/ggml.c index bf8ec8ab2..83395a701 100644 --- a/ggml.c +++ b/ggml.c @@ -150,10 +150,10 @@ typedef double ggml_float; // #include -#define GGML_COMPUTE_FP16_TO_FP32(x) (x) +#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x)) #define GGML_COMPUTE_FP32_TO_FP16(x) (x) -#define GGML_FP16_TO_FP32(x) (x) +#define GGML_FP16_TO_FP32(x) ((float) (x)) #define GGML_FP32_TO_FP16(x) (x) #else @@ -322,7 +322,7 @@ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { // note: do not use these inside ggml.c // these are meant to be used via the ggml.h API float ggml_fp16_to_fp32(ggml_fp16_t x) { - return GGML_FP16_TO_FP32(x); + return (float) GGML_FP16_TO_FP32(x); } ggml_fp16_t ggml_fp32_to_fp16(float x) { @@ -488,8 +488,8 @@ static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * r const float v0 = x[i*QK + l + 0]*id; const float v1 = x[i*QK + l + 1]*id; - const uint8_t vi0 = ((int8_t) (round(v0))) + 8; - const uint8_t vi1 = ((int8_t) (round(v1))) + 8; + const uint8_t vi0 = (int8_t)roundf(v0) + 8; + const uint8_t vi1 = (int8_t)roundf(v1) + 8; assert(vi0 >= 0 && vi0 < 16); assert(vi1 >= 0 && vi1 < 16); @@ -566,7 +566,7 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3))); const float d = amax / ((1 << 3) - 1); - const float id = d ? 1.0/d : 0.0; + const float id = d ? 1.0f/d : 0.0f; y[i].d = d; @@ -716,8 +716,8 @@ static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int const float v0 = (x[i*QK + l + 0] - min)*id; const float v1 = (x[i*QK + l + 1] - min)*id; - const uint8_t vi0 = round(v0); - const uint8_t vi1 = round(v1); + const uint8_t vi0 = roundf(v0); + const uint8_t vi1 = roundf(v1); assert(vi0 >= 0 && vi0 < 16); assert(vi1 >= 0 && vi1 < 16); @@ -1001,7 +1001,7 @@ static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, in } \ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \ - res = vaddvq_f32(vaddq_f32(t0, t1)); \ + res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ } #define GGML_F16_VEC GGML_F16x8 @@ -1437,9 +1437,8 @@ inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, co inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { - ggml_float sumf = 0.0; - #ifdef GGML_SIMD + float sumf = 0.0f; const int np = (n & ~(GGML_F32_STEP - 1)); GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; @@ -1465,8 +1464,9 @@ inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float } #else // scalar + ggml_float sumf = 0.0; for (int i = 0; i < n; ++i) { - sumf += x[i]*y[i]; + sumf += (ggml_float)(x[i]*y[i]); } #endif @@ -1529,11 +1529,11 @@ inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t // leftovers for (int i = np; i < n; ++i) { - sumf += GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]); + sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); } #else for (int i = 0; i < n; ++i) { - sumf += GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]); + sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); } #endif @@ -1549,7 +1549,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void const block_q4_0 * restrict x = vx; const block_q4_0 * restrict y = vy; - float sumf = 0.0; + ggml_float sumf = 0.0; #if defined(__ARM_NEON) float sum0 = 0.0f; @@ -1644,7 +1644,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void #endif } - sumf = sum0 + sum1; + sumf = (ggml_float)(sum0 + sum1); #elif defined(__AVX512F__) // Initialize accumulator with zeros __m512 acc0 = _mm512_setzero_ps(); @@ -1972,13 +1972,13 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * re // leftovers for (int i = np; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]); + sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); } } #else for (int i = 0; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]); + sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); } } #endif @@ -2049,19 +2049,19 @@ inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #endif } -inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrt(*s); } +inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); } inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } -inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrt(x[i]); } +inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } -static const ggml_float GELU_COEF_A = 0.044715; -static const ggml_float SQRT_2_OVER_PI = 0.79788456080286535587989211986876; +static const float GELU_COEF_A = 0.044715f; +static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; inline static float ggml_gelu_f32(float x) { - return 0.5*x*(1.0 + tanh(SQRT_2_OVER_PI*x*(1.0 + GELU_COEF_A*x*x))); + return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { @@ -2090,7 +2090,7 @@ inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { // Sigmoid Linear Unit (SiLU) function inline static float ggml_silu_f32(float x) { - return x/(1.0 + exp(-x)); + return x/(1.0f + expf(-x)); } inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { @@ -2121,7 +2121,7 @@ inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE ggml_float sum = 0.0; for (int i = 0; i < n; ++i) { - sum += x[i]; + sum += (ggml_float)x[i]; } *s = sum; #else @@ -2131,7 +2131,7 @@ inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE - ggml_float max = -INFINITY; + float max = -INFINITY; for (int i = 0; i < n; ++i) { max = MAX(max, x[i]); } @@ -2141,7 +2141,10 @@ inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { #endif } -inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { ggml_vec_norm_f32(n, s, x); *s = 1./(*s); } +inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { + ggml_vec_norm_f32(n, s, x); + *s = 1.f/(*s); +} // // logging @@ -2540,7 +2543,7 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); - table_exp_f16[i] = GGML_FP32_TO_FP16(exp(f)); + table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); @@ -5583,7 +5586,7 @@ static void ggml_compute_forward_norm_f32( const size_t nb2 = dst->nb[2]; const size_t nb3 = dst->nb[3]; - const ggml_float eps = 1e-5f; // TODO: make this a parameter + const float eps = 1e-5f; // TODO: make this a parameter // TODO: optimize for (int i03 = 0; i03 < ne03; i03++) { @@ -5591,23 +5594,24 @@ static void ggml_compute_forward_norm_f32( for (int i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - ggml_float mean = 0.0; + ggml_float sum = 0.0; for (int i00 = 0; i00 < ne00; i00++) { - mean += x[i00]; + sum += (ggml_float)x[i00]; } - mean /= ne00; + float mean = sum/ne00; float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); ggml_float sum2 = 0.0; for (int i00 = 0; i00 < ne00; i00++) { - ggml_float v = x[i00] - mean; + float v = x[i00] - mean; y[i00] = v; - sum2 += v*v; + sum2 += (ggml_float)(v*v); } - const float scale = 1.0/sqrt(sum2/ne00 + eps); + float variance = sum2/ne00; + const float scale = 1.0f/sqrtf(variance + eps); ggml_vec_scale_f32(ne00, y, scale); } @@ -5665,7 +5669,7 @@ static void ggml_compute_forward_rms_norm_f32( const size_t nb2 = dst->nb[2]; const size_t nb3 = dst->nb[3]; - const ggml_float eps = 1e-6f; // TODO: make this a parameter + const float eps = 1e-6f; // TODO: make this a parameter // TODO: optimize for (int i03 = 0; i03 < ne03; i03++) { @@ -5673,12 +5677,12 @@ static void ggml_compute_forward_rms_norm_f32( for (int i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - ggml_float mean = 0.0; + ggml_float sum = 0.0; for (int i00 = 0; i00 < ne00; i00++) { - mean += x[i00] * x[i00]; + sum += (ggml_float)(x[i00] * x[i00]); } - mean /= ne00; + float mean = sum/ne00; float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); @@ -5687,7 +5691,7 @@ static void ggml_compute_forward_rms_norm_f32( // y[i00] = x[i00]; // } - const float scale = 1.0/sqrt(mean + eps); + const float scale = 1.0f/sqrtf(mean + eps); ggml_vec_scale_f32(ne00, y, scale); } @@ -6913,12 +6917,12 @@ static void ggml_compute_forward_soft_max_f32( ggml_fp16_t s = GGML_FP32_TO_FP16(p[i] - max); memcpy(&scvt, &s, sizeof(scvt)); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); - sum += val; + sum += (ggml_float)val; p[i] = val; } } - assert(sum > 0.0f); + assert(sum > 0.0); sum = 1.0/sum; ggml_vec_scale_f32(nc, p, sum); @@ -6994,16 +6998,16 @@ static void ggml_compute_forward_rope_f32( const int p = (mode == 0 ? n_past + i2 : i2); for (int i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < n_dims; i0 += 2) { - const double theta = pow(10000.0, ((double)-i0)/n_dims); + const float theta = powf(10000.0, ((float)-i0)/n_dims); - const double cos_theta = cos(p*theta); - const double sin_theta = sin(p*theta); + const float cos_theta = cosf(p*theta); + const float sin_theta = sinf(p*theta); const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - double x0 = src[0]; - double x1 = src[1]; + const float x0 = src[0]; + const float x1 = src[1]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[1] = x0*sin_theta + x1*cos_theta; @@ -7050,16 +7054,16 @@ static void ggml_compute_forward_rope_f16( const int p = (mode == 0 ? n_past + i2 : i2); for (int i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < n_dims; i0 += 2) { - const double theta = pow(10000.0, ((double)-i0)/n_dims); + const float theta = powf(10000.0, ((float)-i0)/n_dims); - const double cos_theta = cos(p*theta); - const double sin_theta = sin(p*theta); + const float cos_theta = cosf(p*theta); + const float sin_theta = sinf(p*theta); const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - double x0 = ggml_fp16_to_fp32(src[0]); - double x1 = ggml_fp16_to_fp32(src[1]); + const float x0 = ggml_fp16_to_fp32(src[0]); + const float x1 = ggml_fp16_to_fp32(src[1]); dst_data[0] = ggml_fp32_to_fp16(x0*cos_theta - x1*sin_theta); dst_data[1] = ggml_fp32_to_fp16(x0*sin_theta + x1*cos_theta); @@ -7735,7 +7739,7 @@ static void ggml_compute_forward_flash_attn_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - const float scale = 1.0/sqrt((double) D); + const float scale = 1.0f/sqrtf(D); //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); @@ -7782,7 +7786,7 @@ static void ggml_compute_forward_flash_attn_f32( float max = -INFINITY; ggml_vec_max_f32(M, &max, S); - float sum = 0.0f; + ggml_float sum = 0.0; { #ifdef GGML_SOFT_MAX_ACCELERATE max = -max; @@ -7803,7 +7807,7 @@ static void ggml_compute_forward_flash_attn_f32( ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); - sump[j] += val; + sump[j] += (ggml_float)val; SS[j] = val; } } @@ -7815,7 +7819,7 @@ static void ggml_compute_forward_flash_attn_f32( #endif } - assert(sum > 0.0f); + assert(sum > 0.0); sum = 1.0/sum; ggml_vec_scale_f32(M, S, sum); @@ -7944,7 +7948,7 @@ static void ggml_compute_forward_flash_attn_f16( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - const float scale = 1.0/sqrt((double) D); + const float scale = 1.0f/sqrtf(D); //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); @@ -8008,7 +8012,7 @@ static void ggml_compute_forward_flash_attn_f16( float max = -INFINITY; ggml_vec_max_f32(M, &max, S); - float sum = 0.0f; + ggml_float sum = 0.0; { #ifdef GGML_SOFT_MAX_ACCELERATE max = -max; @@ -8029,7 +8033,7 @@ static void ggml_compute_forward_flash_attn_f16( ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); - sump[j] += val; + sump[j] += (ggml_float)val; SS[j] = val; } } @@ -8041,7 +8045,7 @@ static void ggml_compute_forward_flash_attn_f16( #endif } - assert(sum > 0.0f); + assert(sum > 0.0); sum = 1.0/sum; ggml_vec_scale_f32(M, S, sum); @@ -9566,7 +9570,7 @@ label=\"%d [%d, %d] | %s", fprintf(fp, " \"%p\" [ \ style = filled; fillcolor = %s; shape = record; \ label=\"%.1e\"; ]\n", - (void *) node, color, ggml_get_f32_1d(node, 0)); + (void *) node, color, (double)ggml_get_f32_1d(node, 0)); } else { fprintf(fp, " \"%p\" [ \ style = filled; fillcolor = %s; shape = record; \ @@ -9804,7 +9808,7 @@ static enum ggml_opt_result ggml_opt_adam( if (params.past <= t) { const float rate = (pf[t%params.past] - fx)/fx; - if (fabs(rate) < params.delta) { + if (fabsf(rate) < params.delta) { return GGML_OPT_OK; } } @@ -9883,7 +9887,7 @@ static enum ggml_opt_result linesearch_backtracking( const float dec = 0.5f; const float inc = 2.1f; - if (*step <= 0.) { + if (*step <= 0.f) { return GGML_LINESEARCH_INVALID_PARAMETERS; } @@ -9971,7 +9975,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( struct ggml_cgraph * gb) { if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE || params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { - if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1. <= params.lbfgs.wolfe) { + if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) { return GGML_OPT_INVALID_WOLFE; } } @@ -10092,8 +10096,8 @@ static enum ggml_opt_result ggml_opt_lbfgs( GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0)); - if (xnorm < 1.0) { - xnorm = 1.0; + if (xnorm < 1.0f) { + xnorm = 1.0f; } if (gnorm/xnorm <= params.lbfgs.eps) { // converged @@ -10106,7 +10110,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( if (params.past <= k) { const float rate = (pf[k%params.past] - fx)/fx; - if (fabs(rate) < params.delta) { + if (fabsf(rate) < params.delta) { return GGML_OPT_OK; } } diff --git a/llama.cpp b/llama.cpp index b0eab2e72..ee7eb8ea7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -779,8 +779,8 @@ static bool llama_model_load( // progress if (progress_callback) { - double current_file_progress = double(size_t(fin.tellg()) - file_offset) / double(file_size - file_offset); - double current_progress = (double(i) + current_file_progress) / double(n_parts); + float current_file_progress = float(size_t(fin.tellg()) - file_offset) / float(file_size - file_offset); + float current_progress = (float(i) + current_file_progress) / float(n_parts); progress_callback(current_progress, progress_callback_user_data); } if (model.n_loaded % 8 == 0) { @@ -922,7 +922,7 @@ static bool llama_eval_internal( struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, - ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))); + ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head))); // KQ_masked = mask_past(KQ_scaled) struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); @@ -1240,12 +1240,12 @@ static std::vector llama_tokenize(const llama_vocab & vocab, co // sampling // -static void sample_top_k(std::vector> & logits_id, int top_k) { +static void sample_top_k(std::vector> & logits_id, int top_k) { // find the top k tokens std::partial_sort( logits_id.begin(), logits_id.begin() + top_k, logits_id.end(), - [](const std::pair & a, const std::pair & b) { + [](const std::pair & a, const std::pair & b) { return a.first > b.first; }); @@ -1256,9 +1256,9 @@ static llama_vocab::id llama_sample_top_p_top_k( llama_context & lctx, const std::vector & last_n_tokens, int top_k, - double top_p, - double temp, - double repeat_penalty) { + float top_p, + float temp, + float repeat_penalty) { auto & rng = lctx.rng; const int n_logits = lctx.model.hparams.n_vocab; @@ -1266,17 +1266,17 @@ static llama_vocab::id llama_sample_top_p_top_k( const auto & logits = lctx.logits; const auto * plogits = logits.data() + logits.size() - n_logits; - std::vector> logits_id; + std::vector> logits_id; logits_id.reserve(n_logits); { - const double scale = 1.0/temp; + const float scale = 1.0f/temp; for (int i = 0; i < n_logits; ++i) { // repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858) // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) { // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability - if (plogits[i] < 0.0) { + if (plogits[i] < 0.0f) { logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i)); } else { logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i)); @@ -1289,18 +1289,18 @@ static llama_vocab::id llama_sample_top_p_top_k( sample_top_k(logits_id, top_k); - double maxl = -std::numeric_limits::infinity(); + float maxl = -std::numeric_limits::infinity(); for (const auto & kv : logits_id) { maxl = std::max(maxl, kv.first); } // compute probs for the top k tokens - std::vector probs; + std::vector probs; probs.reserve(logits_id.size()); double sum = 0.0; for (const auto & kv : logits_id) { - double p = exp(kv.first - maxl); + const float p = expf(kv.first - maxl); probs.push_back(p); sum += p; } @@ -1310,8 +1310,8 @@ static llama_vocab::id llama_sample_top_p_top_k( p /= sum; } - if (top_p < 1.0f) { - double cumsum = 0.0f; + if (top_p < 1.0) { + double cumsum = 0.0; for (int i = 0; i < (int) probs.size(); i++) { cumsum += probs[i]; if (cumsum >= top_p) { @@ -1590,7 +1590,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s } for (int i = 0; i < (int) hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); + printf("%5.3f ", hist_cur[i] / float(nelements)); } printf("\n"); } else { @@ -1613,7 +1613,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s printf("%s: hist: ", __func__); for (int i = 0; i < (int) hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); + printf("%5.3f ", hist_all[i] / float(sum_all)); } printf("\n"); } @@ -1795,9 +1795,9 @@ llama_token llama_sample_top_p_top_k( const llama_token * last_n_tokens_data, int last_n_tokens_size, int top_k, - double top_p, - double temp, - double repeat_penalty) { + float top_p, + float temp, + float repeat_penalty) { const int64_t t_start_sample_us = ggml_time_us(); llama_token result = 0; @@ -1828,11 +1828,11 @@ void llama_print_timings(struct llama_context * ctx) { const int32_t n_p_eval = std::max(1, ctx->n_p_eval); fprintf(stderr, "\n"); - fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0f); - fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->t_sample_us, n_sample, 1e-3f * ctx->t_sample_us / n_sample); - fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3f * ctx->t_p_eval_us, n_p_eval, 1e-3f * ctx->t_p_eval_us / n_p_eval); - fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->t_eval_us, n_eval, 1e-3f * ctx->t_eval_us / n_eval); - fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0f); + fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0); + fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3 * ctx->t_sample_us, n_sample, 1e-3 * ctx->t_sample_us / n_sample); + fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_p_eval_us, n_p_eval, 1e-3 * ctx->t_p_eval_us / n_p_eval); + fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3 * ctx->t_eval_us, n_eval, 1e-3 * ctx->t_eval_us / n_eval); + fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0); } void llama_reset_timings(struct llama_context * ctx) { diff --git a/llama.h b/llama.h index d3f4cae61..f5a576c1e 100644 --- a/llama.h +++ b/llama.h @@ -45,7 +45,7 @@ extern "C" { } llama_token_data; - typedef void (*llama_progress_callback)(double progress, void *ctx); + typedef void (*llama_progress_callback)(float progress, void *ctx); struct llama_context_params { int n_ctx; // text context @@ -134,9 +134,9 @@ extern "C" { const llama_token * last_n_tokens_data, int last_n_tokens_size, int top_k, - double top_p, - double temp, - double repeat_penalty); + float top_p, + float temp, + float repeat_penalty); // Performance information LLAMA_API void llama_print_timings(struct llama_context * ctx); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index b44d7fe7e..157d7336e 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,5 +5,6 @@ function(llama_add_test source) add_test(NAME ${TEST_TARGET} COMMAND $ ${ARGN}) endfunction() +# llama_add_test(test-double-float.c) # SLOW llama_add_test(test-quantize.c) llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) diff --git a/tests/test-double-float.c b/tests/test-double-float.c new file mode 100644 index 000000000..89dafc9f2 --- /dev/null +++ b/tests/test-double-float.c @@ -0,0 +1,53 @@ +// These tests may take a long time! +// They are to prove that conversion from double to float of various functions in ggml.c doesn't affect the result. +// This is done by checking all finite (non-NaN, non-infinite) floats. + +#undef NDEBUG +#include +#include +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdouble-promotion" + +// ggml.c::quantize_row_q4_0_reference +inline static uint8_t round_orig(float v0) { return ((int8_t) (round(v0))) + 8; } + +// ggml.c::ggml_silu_f32 +inline static float silu_orig(float x) { + return x/(1.0 + exp(-x)); +} + +#pragma GCC diagnostic pop + +// ggml.c::quantize_row_q4_0_reference +inline static uint8_t round_float(float v0) { return (int8_t)roundf(v0) + 8; } + +// ggml.c::ggml_silu_f32 +inline static float silu_float(float x) { + return x/(1.0f + expf(-x)); +} + +int main(void) { + uint32_t x = UINT32_MAX; + do { + float f = *(float *)&x; + assert(!isfinite(f) || (round_orig(f) == round_float(f))); + } while (x--); + +#ifdef __F16C__ + // GELU and SILU implementations are used with a FP16 lookup table. + // The original and float-only results are not equal for all inputs after converting to FP16. + // GELU is an approximation anyway (tanh), not tested here. + // For SILU, verify that the results are at least the closest floating point numbers, if the FP16 values don't match. + for (x = 0; x <= UINT16_MAX; x++) { + float f = _cvtsh_ss(x); + const float so = silu_orig(f); + const float sf = silu_float(f); + assert( (_cvtss_sh(so, 0) == _cvtss_sh(sf, 0)) + || (nextafterf(so, sf) == sf) + || (nextafterf(sf, so) == so)); + } +#endif +} From d502bc7c9d9d6dfb3a09aea404395b666d7b374d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 28 Mar 2023 19:51:55 +0300 Subject: [PATCH 21/76] tests : free llama context at the end of the test --- CMakeLists.txt | 3 ++- tests/test-tokenizer-0.cpp | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 241be4c15..d7b0eba29 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -129,13 +129,14 @@ if (LLAMA_ALL_WARNINGS) -Wshadow -Wstrict-prototypes -Wpointer-arith + -Wno-unused-function ) set(cxx_flags -Wall -Wextra -Wpedantic -Wcast-qual - -Wdouble-promotion + -Wno-unused-function ) else() # todo : msvc diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp index 382055324..55b086dae 100644 --- a/tests/test-tokenizer-0.cpp +++ b/tests/test-tokenizer-0.cpp @@ -77,5 +77,7 @@ int main(int argc, char **argv) { } } + llama_free(ctx); + return 0; } From 96f9c0506fa81cada6f96f45768c34f45406c4bb Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 28 Mar 2023 20:01:09 +0300 Subject: [PATCH 22/76] ci : make ctest verbose, hopefully we see what is wrong with the sanitizer --- .github/workflows/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 26b451943..cb35a3298 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -62,7 +62,7 @@ jobs: id: cmake_test run: | cd build - ctest --output-on-failure + ctest --verbose ubuntu-latest-cmake-sanitizer: runs-on: ubuntu-latest @@ -98,7 +98,7 @@ jobs: id: cmake_test run: | cd build - ctest --output-on-failure + ctest --verbose macOS-latest-make: runs-on: macos-latest @@ -143,7 +143,7 @@ jobs: id: cmake_test run: | cd build - ctest --output-on-failure + ctest --verbose windows-latest-cmake: runs-on: windows-latest @@ -185,7 +185,7 @@ jobs: if: ${{ matrix.build != 'avx512' || env.HAS_AVX512F == '1' }} # Test AVX-512 only when possible run: | cd build - ctest -C Release --output-on-failure + ctest -C Release --verbose - name: Get commit hash id: commit From 692ce3164ef1201ecb9cfad315cc0a08b965adb8 Mon Sep 17 00:00:00 2001 From: "DooWoong Lee (David)" Date: Wed, 29 Mar 2023 02:02:34 +0900 Subject: [PATCH 23/76] py : removed unused `model` variable and verified that the code functions correctly with `vocab_only` setting. Also confirmed that the code works as expected after running with reduced memory usage due to deletion of no-longer-needed variable. (#547) --- convert-pth-to-ggml.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index ccf2c57b1..d83f8a137 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -145,13 +145,11 @@ def main(): print(f"Extracting only the vocab from '{fname_model}'\n") - model = torch.load(fname_model, map_location="cpu") with open(fname_out, "wb") as fout: write_header(fout, hparams, ftype) write_tokens(fout, tokenizer) - del model print(f"Done. Output file: {fname_out}\n") From 99c5b2765422232ebb4414f5a63693d734406a7f Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Tue, 28 Mar 2023 17:13:01 +0000 Subject: [PATCH 24/76] ggml : refactor quantized processing functions (#509) * Refactor quantized processing functions * ggml : minor --------- Co-authored-by: Georgi Gerganov --- ggml.c | 307 ++++++++++----------------------------------------------- 1 file changed, 53 insertions(+), 254 deletions(-) diff --git a/ggml.c b/ggml.c index 83395a701..ea7277895 100644 --- a/ggml.c +++ b/ggml.c @@ -1540,7 +1540,7 @@ inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t *s = sumf; } -inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int nb = n / QK; assert(n % QK == 0); @@ -1824,7 +1824,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void *s = sumf; } -inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int nb = n / QK; const block_q4_1 * restrict x = vx; @@ -6106,7 +6106,30 @@ static void ggml_compute_forward_mul_mat_f16_f32( //} } -static void ggml_compute_forward_mul_mat_q4_0_f32( +typedef void (*dequantize_row_q_t)(const void * restrict x, float * restrict y, int k); +typedef void (*quantize_row_q_t)(const float * restrict x, void * restrict y, int k); +typedef void (*vec_dot_q_t)(const int n, float * restrict s, const void * restrict x, const void * restrict y); + +typedef struct { + dequantize_row_q_t dequantize_row_q; + quantize_row_q_t quantize_row_q; + vec_dot_q_t vec_dot_q; +} quantize_fns_t; + +static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = { + [GGML_TYPE_Q4_0] = { + .dequantize_row_q = dequantize_row_q4_0, + .quantize_row_q = quantize_row_q4_0, + .vec_dot_q = ggml_vec_dot_q4_0, + }, + [GGML_TYPE_Q4_1] = { + .dequantize_row_q = dequantize_row_q4_1, + .quantize_row_q = quantize_row_q4_1, + .vec_dot_q = ggml_vec_dot_q4_1, + }, +}; + +static void ggml_compute_forward_mul_mat_q_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -6152,8 +6175,12 @@ static void ggml_compute_forward_mul_mat_q4_0_f32( GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); + const enum ggml_type type = src0->type; + quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q; + vec_dot_q_t const vec_dot_q = quantize_fns[type].vec_dot_q; + // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_0]); + GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]); GGML_ASSERT(nb10 == sizeof(float)); // dst cannot be transposed or permuted @@ -6185,194 +6212,14 @@ static void ggml_compute_forward_mul_mat_q4_0_f32( } float * const wdata = params->wdata; + dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { { size_t id = 0; for (int i01 = 0; i01 < ne01; ++i01) { - dequantize_row_q4_0((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); - id += ne00; - } - } - - const float * x = wdata; - const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); - - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - - // zT = y * xT - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne11, ne01, ne10, - 1.0f, y, ne10, - x, ne10, - 0.0f, d, ne01); - } - } - - /*printf("CBLAS Q4_0 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/ - - return; - } -#endif - - if (params->type == GGML_TASK_INIT) { - char * wdata = params->wdata; - - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - for (int i11 = 0; i11 < ne11; ++i11) { - quantize_row_q4_0((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); - wdata += (ne10*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]; - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // parallelize by src0 rows using ggml_vec_dot_q4_0 - - // total rows in src0 - const int nr = ne01*ne02*ne03; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - void * wdata = params->wdata; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir/(ne02*ne01); - const int i02 = (ir - i03*ne02*ne01)/ne01; - const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - - const int i13 = i03; - const int i12 = i02; - - const int i0 = i01; - const int i2 = i02; - const int i3 = i03; - - void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]); - - float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); - - assert(ne00 % 32 == 0); - - for (int ic = 0; ic < ne11; ++ic) { - ggml_vec_dot_q4_0(ne00, &dst_col[ic*ne0], src0_row, ((void *) (src1_col + (ic*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]))); - } - } - - //int64_t t1 = ggml_time_us(); - //static int64_t acc = 0; - //acc += t1 - t0; - //if (t1 - t0 > 10) { - // printf("\n"); - // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); - // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); - // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); - - // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); - //} -} - -static void ggml_compute_forward_mul_mat_q4_1_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; - - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - const int ne12 = src1->ne[2]; - const int ne13 = src1->ne[3]; - - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - const int ne2 = dst->ne[2]; - const int ne3 = dst->ne[3]; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - const int nb12 = src1->nb[2]; - const int nb13 = src1->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_1]); - GGML_ASSERT(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - if (params->ith != 0) { - return; - } - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - float * const wdata = params->wdata; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - { - size_t id = 0; - for (int i01 = 0; i01 < ne01; ++i01) { - dequantize_row_q4_1((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); + dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); id += ne00; } } @@ -6399,15 +6246,13 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( if (params->type == GGML_TASK_INIT) { char * wdata = params->wdata; + const size_t row_size = ne10*GGML_TYPE_SIZE[type]/GGML_BLCK_SIZE[type]; for (int i13 = 0; i13 < ne13; ++i13) { for (int i12 = 0; i12 < ne12; ++i12) { for (int i11 = 0; i11 < ne11; ++i11) { - //for (int i10 = 0; i10 < ne10; ++i10) { - // wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); - //} - quantize_row_q4_1((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); - wdata += (ne10*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]; + quantize_row_q((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); + wdata += row_size; } } } @@ -6419,7 +6264,7 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( return; } - // parallelize by src0 rows using ggml_vec_dot_q4_1 + // parallelize by src0 rows using ggml_vec_dot_q // total rows in src0 const int nr = ne01*ne02*ne03; @@ -6432,6 +6277,7 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( const int ir1 = MIN(ir0 + dr, nr); void * wdata = params->wdata; + const size_t row_size = ne00*GGML_TYPE_SIZE[type]/GGML_BLCK_SIZE[type]; for (int ir = ir0; ir < ir1; ++ir) { // src0 indices @@ -6447,14 +6293,14 @@ static void ggml_compute_forward_mul_mat_q4_1_f32( const int i3 = i03; void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]); + char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size)); float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); assert(ne00 % 32 == 0); for (int ic = 0; ic < ne11; ++ic) { - ggml_vec_dot_q4_1(ne00, &dst_col[ic*ne0], src0_row, ((void *) (src1_col + (ic*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]))); + vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); } } @@ -6478,12 +6324,9 @@ static void ggml_compute_forward_mul_mat( struct ggml_tensor * dst) { switch (src0->type) { case GGML_TYPE_Q4_0: - { - ggml_compute_forward_mul_mat_q4_0_f32(params, src0, src1, dst); - } break; case GGML_TYPE_Q4_1: { - ggml_compute_forward_mul_mat_q4_1_f32(params, src0, src1, dst); + ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst); } break; case GGML_TYPE_F16: { @@ -6644,7 +6487,7 @@ static void ggml_compute_forward_transpose( // ggml_compute_forward_get_rows -static void ggml_compute_forward_get_rows_q4_0( +static void ggml_compute_forward_get_rows_q( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -6657,42 +6500,17 @@ static void ggml_compute_forward_get_rows_q4_0( const int nc = src0->ne[0]; const int nr = ggml_nelements(src1); + const enum ggml_type type = src0->type; + dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q; assert( dst->ne[0] == nc); assert( dst->ne[1] == nr); - assert(src0->nb[0] == GGML_TYPE_SIZE[GGML_TYPE_Q4_0]); + assert(src0->nb[0] == GGML_TYPE_SIZE[type]); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; - dequantize_row_q4_0( - (const void *) ((char *) src0->data + r*src0->nb[1]), - (float *) ((char *) dst->data + i*dst->nb[1]), nc); - } -} - -static void ggml_compute_forward_get_rows_q4_1( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - assert( dst->ne[0] == nc); - assert( dst->ne[1] == nr); - assert(src0->nb[0] == GGML_TYPE_SIZE[GGML_TYPE_Q4_1]); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - dequantize_row_q4_1( + dequantize_row_q( (const void *) ((char *) src0->data + r*src0->nb[1]), (float *) ((char *) dst->data + i*dst->nb[1]), nc); } @@ -6760,12 +6578,9 @@ static void ggml_compute_forward_get_rows( struct ggml_tensor * dst) { switch (src0->type) { case GGML_TYPE_Q4_0: - { - ggml_compute_forward_get_rows_q4_0(params, src0, src1, dst); - } break; case GGML_TYPE_Q4_1: { - ggml_compute_forward_get_rows_q4_1(params, src0, src1, dst); + ggml_compute_forward_get_rows_q(params, src0, src1, dst); } break; case GGML_TYPE_F16: { @@ -9098,8 +8913,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) size_t cur = 0; - if (node->src0->type == GGML_TYPE_F16 && - node->src1->type == GGML_TYPE_F32) { + if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) { #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { node->n_tasks = 1; // TODO: this actually is doing nothing @@ -9114,33 +8928,18 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) #else cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1); #endif - } else if (node->src0->type == GGML_TYPE_F32 && - node->src1->type == GGML_TYPE_F32) { + } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) { cur = 0; - } else if (node->src0->type == GGML_TYPE_Q4_0 && - node->src1->type == GGML_TYPE_F32) { + } else if (quantize_fns[node->src0->type].vec_dot_q && node->src1->type == GGML_TYPE_F32) { #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { node->n_tasks = 1; cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); - } else { - cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_0]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]; - } -#else - cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_0]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]; + } else #endif - } else if (node->src0->type == GGML_TYPE_Q4_1 && - node->src1->type == GGML_TYPE_F32) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { - node->n_tasks = 1; - cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); - } else { - cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_1]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]; + { + cur = GGML_TYPE_SIZE[node->src0->type]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[node->src0->type]; } -#else - cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_1]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]; -#endif } else { GGML_ASSERT(false); } From d0330fd783d7c67349cdcce4a56604ef0aeccdb5 Mon Sep 17 00:00:00 2001 From: Tai Duc Nguyen Date: Tue, 28 Mar 2023 13:51:29 -0400 Subject: [PATCH 25/76] py : add capabiliy to convert from ggml back to torch or hf format for further consumption/training/finetuning (#403) --- convert_ggml_to_pth.py | 294 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 convert_ggml_to_pth.py diff --git a/convert_ggml_to_pth.py b/convert_ggml_to_pth.py new file mode 100644 index 000000000..20158c9ca --- /dev/null +++ b/convert_ggml_to_pth.py @@ -0,0 +1,294 @@ +# Author: github.com/ductai199x +import argparse +import os +import struct + +import numpy as np +import torch +from numba import njit +from tqdm.auto import tqdm + + +def read_header(fin): + values = struct.unpack("i" * 9, fin.read(4 * 9)) + _, _, vocab_size, dim, multiple_of, n_heads, n_layers, rot, ftype = values + return { + "vocab_size": vocab_size, + "dim": dim, + "multiple_of": multiple_of, + "n_heads": n_heads, + "n_layers": n_layers, + }, ftype + + +def read_tokens(fin, vocab_size): + tokens = [] + for _ in range(vocab_size): + text_len = struct.unpack("i", fin.read(4))[0] + text_bytes = fin.read(text_len) + try: + text = text_bytes.decode("utf-8") + except UnicodeDecodeError: + text = text_bytes.decode("utf-8", "replace") + score = struct.unpack("f", fin.read(4))[0] + tokens.append((text, score)) + return tokens + + +@njit +def dequantize_weights_numba(fin_data, n_rows, n_cols): + qk = 32 + nb = n_cols // qk + bs = 4 + (qk // 2) + + weights = np.zeros((n_rows, n_cols), dtype=np.float32) + data_pos = 0 + + for row in range(n_rows): + for block in range(nb): + d = np.frombuffer(fin_data[data_pos : data_pos + 4], dtype=np.float32)[0] + data_pos += 4 + packed_values = fin_data[data_pos : data_pos + (qk // 2)] + data_pos += qk // 2 + + for i in range(qk // 2): + packed_value = packed_values[i] + v0 = np.float32((packed_value & 0b00001111) - 8) * d + v1 = np.float32((packed_value >> 4) - 8) * d + + weights[row, block * qk + 2 * i] = v0 + weights[row, block * qk + 2 * i + 1] = v1 + + return weights + + +def dequantize_weights(fin, n_rows, n_cols): + qk = 32 + nb = n_cols // qk + data_size = n_rows * n_cols // 2 + n_rows * nb * 4 + fin_data = fin.read(data_size) + return dequantize_weights_numba(fin_data, n_rows, n_cols) + + +def read_variables(fin): + model = {} + pbar = tqdm(total=os.path.getsize(fin.name), unit="B", unit_scale=True, desc="Reading variables") + while True: + start_pos = fin.tell() + try: + n_dims, name_length, ftype_cur = struct.unpack("iii", fin.read(4 * 3)) + except struct.error: + break + + shape = tuple(struct.unpack("i" * n_dims, fin.read(4 * n_dims))) + shape = shape[::-1] + name = fin.read(name_length).decode("utf-8") + + if ftype_cur == 2: + # 4-bit quantized weights + dtype = np.uint8 + data = dequantize_weights(fin, shape[0], shape[1]) + data = data.reshape(shape) + elif ftype_cur == 0: + dtype = np.float32 + data_size = np.prod(shape) + data = np.fromfile(fin, dtype=dtype, count=data_size).reshape(shape) + elif ftype_cur == 1: + dtype = np.float16 + data_size = np.prod(shape) + data = np.fromfile(fin, dtype=dtype, count=data_size).reshape(shape) + + model[name] = torch.tensor(data, dtype=torch.float32 if dtype == np.float32 else torch.float16) + + pbar.update(fin.tell() - start_pos) + + return model + + +def convert_to_hf_format(model, hparams): + # This works for llama 7B, need to test with other models + n_layers = hparams["n_layers"] + n_heads = hparams["n_heads"] + dim = hparams["dim"] + dims_per_head = dim // n_heads + base = 10000.0 + inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) + + # permute for sliced rotary + def permute(w): + return w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim) + + state_dict = {} + for layer_i in range(n_layers): + state_dict.update( + { + f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( + model[f"layers.{layer_i}.attention.wq.weight"] + ), + f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( + model[f"layers.{layer_i}.attention.wk.weight"] + ), + f"model.layers.{layer_i}.self_attn.v_proj.weight": model[ + f"layers.{layer_i}.attention.wv.weight" + ], + f"model.layers.{layer_i}.self_attn.o_proj.weight": model[ + f"layers.{layer_i}.attention.wo.weight" + ], + f"model.layers.{layer_i}.mlp.gate_proj.weight": model[ + f"layers.{layer_i}.feed_forward.w1.weight" + ], + f"model.layers.{layer_i}.mlp.down_proj.weight": model[ + f"layers.{layer_i}.feed_forward.w2.weight" + ], + f"model.layers.{layer_i}.mlp.up_proj.weight": model[ + f"layers.{layer_i}.feed_forward.w3.weight" + ], + f"model.layers.{layer_i}.input_layernorm.weight": model[ + f"layers.{layer_i}.attention_norm.weight" + ], + f"model.layers.{layer_i}.post_attention_layernorm.weight": model[ + f"layers.{layer_i}.ffn_norm.weight" + ], + } + ) + state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq + state_dict.update( + { + "model.embed_tokens.weight": model["tok_embeddings.weight"], + "model.norm.weight": model["norm.weight"], + "lm_head.weight": model["output.weight"], + } + ) + + return state_dict + + +def chat(model, hparams, llama_dir): + from transformers import (GenerationConfig, LlamaForCausalLM, + LlamaTokenizer, StoppingCriteria, + StoppingCriteriaList) + from transformers.models.llama.configuration_llama import LlamaConfig + + class StoppingCriteriaSub(StoppingCriteria): + def __init__(self): + super().__init__() + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, stops=[]): + print(tokenizer.decode(input_ids[0]), end="", flush=True) + if input_ids[0][-1] == 13: + return True + + return False + + config = LlamaConfig( + vocab_size=hparams["vocab_size"], + dim=hparams["dim"], + num_hidden_layers=hparams["n_layers"], + num_attention_heads=hparams["n_heads"], + ) + + llama = LlamaForCausalLM(config=config) + llama.load_state_dict(state_dict=model, strict=True) + tokenizer = LlamaTokenizer.from_pretrained(llama_dir) + + device = torch.device("cpu") + llama = llama.to(device) + + ctx = """You are AI. +This is a dialog, where User interacts with AI. AI is helpful, kind, obedient, honest, respectful, direct, concise, should try to protect User's privacy, and knows its own limits. Also, AI must answer User and AI cannot stop the conversation by itself. +User: Hello, AI. +AI: Hello! How can I assist you today? +""" + print(ctx.rstrip("\n")) + while True: + print("-" * 60) + prompt = input(f"User: ") + if ctx != "": + ctx = ctx + "User: " + prompt + "\n" + else: + ctx = prompt + "\nAI:" + + ctx = (ctx[-1920:]) if len(ctx) >= 2048 else ctx + + print("-" * 60) + if len(ctx.strip()) > 0: + input_ids = tokenizer(ctx, return_tensors="pt")["input_ids"].to(device) + generation_config = GenerationConfig( + temperature=0.8, + top_p=0.95, + top_k=50, + repetition_penalty=1.1764, + ) + with torch.no_grad(): + generation_output = llama.generate( + input_ids=input_ids, + generation_config=generation_config, + return_dict_in_generate=True, + output_scores=True, + max_length=2048, + do_sample=True, + stopping_criteria=StoppingCriteriaList([StoppingCriteriaSub()]), + ) + s = generation_output.sequences[0] + decoded = tokenizer.decode(s) + ctx = decoded + "\n" + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_dir", "-i", type=str, required=True, help="The input directory containing the ggml files." + ) + parser.add_argument( + "--prefix", + "-p", + type=str, + required=True, + help="The prefix of the ggml files (ggml-model-f16 or ggml-model-q4_0).", + ) + parser.add_argument( + "--hf", + action="store_true", + help="Whether to save the model in the huggingface format. (default: False)", + ) + parser.add_argument( + "--chat", "-c", action="store_true", help="Whether to open a chat with the model. (default: False)" + ) + args = parser.parse_args() + + llama_dir = os.path.abspath(f"{args.input_dir}/../") + + ggml_files = sorted( + [f"{args.input_dir}/{f}" for f in os.listdir(args.input_dir) if f.startswith(args.prefix)] + ) + + fin = open(ggml_files[0], "rb") + hparams, ftype = read_header(fin) + tokens = read_tokens(fin, hparams["vocab_size"]) + model = read_variables(fin) + + for f in tqdm(ggml_files[1:]): + fin = open(f, "rb") + read_header(fin) + read_tokens(fin, hparams["vocab_size"]) + model.update(read_variables(fin)) + + if args.hf: + model = convert_to_hf_format(model, hparams) + + pth_ckpt = { + "state_dict": model, + "hparams": hparams, + "tokens": tokens, + } + + torch.save(pth_ckpt, f"{args.input_dir}/{args.prefix}-to-torch.pth") + + if args.chat: + if not args.hf: + model = convert_to_hf_format(model, hparams) + chat(model, hparams, llama_dir) + + +if __name__ == "__main__": + main() From d0aaff571cd5c316b68e3e11d57e274bfd2bd457 Mon Sep 17 00:00:00 2001 From: thement <40525767+thement@users.noreply.github.com> Date: Tue, 28 Mar 2023 19:55:42 +0200 Subject: [PATCH 26/76] py : add temporary script to convert old ggml files to newer version (#539) Co-authored-by: Jakub Horak --- convert-unversioned-ggml-to-ggml.py | 100 ++++++++++++++++++++++++++++ llama.cpp | 2 +- 2 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 convert-unversioned-ggml-to-ggml.py diff --git a/convert-unversioned-ggml-to-ggml.py b/convert-unversioned-ggml-to-ggml.py new file mode 100644 index 000000000..2457e3181 --- /dev/null +++ b/convert-unversioned-ggml-to-ggml.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# Original by https://github.com/eiz +# https://github.com/ggerganov/llama.cpp/issues/324#issuecomment-1476227818 +import argparse +import glob +import os +import struct +import sys +from sentencepiece import SentencePieceProcessor + +HPARAMS = keys = ["vocab_size", "dim", "multiple_of", "n_heads", "n_layers"] + +def parse_args(): + parser = argparse.ArgumentParser(description='Upgrade old ggml model files to the current format') + parser.add_argument('dir_model', help='directory containing ggml .bin files') + parser.add_argument('tokenizer_model', help='path to LLaMA tokenizer.model file') + return parser.parse_args() + +def read_header(f_in): + struct_fmt = "i" * (3 + len(HPARAMS)) + struct_size = struct.calcsize(struct_fmt) + buf = f_in.read(struct_size) + return struct.unpack(struct_fmt, buf) + +def write_header(f_out, header): + (magic, vocab_size, dim, multiple_of, n_heads, n_layers, rot, ftype) = header + + if magic != 0x67676d6c: + raise Exception('Invalid file magic. Must be an old style ggml file.') + + values = [ + 0x67676d66, # magic: ggml in hex + 1, # file version + vocab_size, + dim, + multiple_of, + n_heads, + n_layers, + rot, + ftype + ] + f_out.write(struct.pack("i" * len(values), *values)) + +def write_tokens(fout, tokenizer): + for i in range(tokenizer.vocab_size()): + if tokenizer.is_unknown(i): + text = " \u2047 ".encode("utf-8") + elif tokenizer.is_control(i): + text = b"" + elif tokenizer.is_byte(i): + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + print(f"Invalid token: {piece}") + sys.exit(1) + byte_value = int(piece[3:-1], 16) + text = struct.pack("B", byte_value) + else: + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + fout.write(struct.pack("i", len(text))) + fout.write(text) + fout.write(struct.pack("f", tokenizer.get_score(i))) + +def read_tokens(f_in, tokenizer): + for i in range(tokenizer.vocab_size()): + len_b = f_in.read(4) + (length,) = struct.unpack("i", len_b) + f_in.read(length) + +def copy_all_data(f_out, f_in): + while True: + buf = f_in.read(1024 * 1024) + if not buf: + break + f_out.write(buf) + +def convert_one_file(path_in, tokenizer): + path_tmp = f"{path_in}.tmp" + path_orig= f"{path_in}.orig" + print(f"converting {path_in}") + with open(path_in, "rb") as f_in, open(path_tmp, "wb") as f_out: + write_header(f_out, read_header(f_in)) + read_tokens(f_in, tokenizer) + write_tokens(f_out, tokenizer) + copy_all_data(f_out, f_in) + os.rename(path_in, path_orig) + os.rename(path_tmp, path_in) + +def main(): + args = parse_args() + files = [] + files.extend(glob.glob(f"{args.dir_model}/*.bin")) + files.extend(glob.glob(f"{args.dir_model}/*.bin.*")) + + tokenizer = SentencePieceProcessor(args.tokenizer_model) + + for file in files: + convert_one_file(file, tokenizer) + +if __name__ == "__main__": + main() diff --git a/llama.cpp b/llama.cpp index ee7eb8ea7..2d0279258 100644 --- a/llama.cpp +++ b/llama.cpp @@ -320,7 +320,7 @@ static bool llama_model_load( uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic == LLAMA_FILE_MAGIC_UNVERSIONED) { - fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n", + fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files or convert them with convert-unversioned-ggml-to-ggml.py!)\n", __func__, fname.c_str()); return false; } From 2a98bc18ea34dbf15f261a0df37080e588a189d1 Mon Sep 17 00:00:00 2001 From: slaren <2141330+slaren@users.noreply.github.com> Date: Tue, 28 Mar 2023 20:06:03 +0200 Subject: [PATCH 27/76] ggml : add AVX2 implementation of quantize_row_q4_1 (#515) * Add AVX2 implementation of quantize_row_q4_1 * Actually use AVX2 * Make quantize_row_q4_1 static Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- ggml.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 89 insertions(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index ea7277895..222d199be 100644 --- a/ggml.c +++ b/ggml.c @@ -688,7 +688,7 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int #endif } -static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) { +static void quantize_row_q4_1_reference(const float * restrict x, void * restrict vy, int k) { assert(k % QK == 0); const int nb = k / QK; @@ -729,6 +729,93 @@ static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int } } +static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) { + assert(k % QK == 0); + +#if defined(__AVX2__) + const int nb = k / QK; + + block_q4_1 * restrict y = vy; + + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max for the block + __m256 vmax; + vmax = _mm256_max_ps( v0, v1 ); + vmax = _mm256_max_ps( vmax, v2 ); + vmax = _mm256_max_ps( vmax, v3 ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( vmax, 1 ), _mm256_castps256_ps128( vmax ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Compute min for the block + __m256 vmin; + vmin = _mm256_min_ps( v0, v1 ); + vmin = _mm256_min_ps( vmin, v2 ); + vmin = _mm256_min_ps( vmin, v3 ); + + __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( vmin, 1 ), _mm256_castps256_ps128( vmin ) ); + min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) ); + min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) ); + const float minScalar = _mm_cvtss_f32( min4 ); + + // Quantize these floats + const float d = (maxScalar - minScalar) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].m = minScalar; + y[i].d = d; + + // x = (x-min)*id + const __m256 mul = _mm256_set1_ps( id ); + const __m256 off = _mm256_set1_ps( minScalar ); + v0 = _mm256_mul_ps( _mm256_sub_ps( v0, off ), mul ); + v1 = _mm256_mul_ps( _mm256_sub_ps( v1, off ), mul ); + v2 = _mm256_mul_ps( _mm256_sub_ps( v2, off ), mul ); + v3 = _mm256_mul_ps( _mm256_sub_ps( v3, off ), mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + // Compress the vector into 4 bit/value, and store + __m128i res = packNibbles( i0 ); + _mm_storeu_si128( ( __m128i* )y[i].qs, res ); + } +#else + // scalar + quantize_row_q4_1_reference(x, vy, k); +#endif +} + static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) { assert(k % QK == 0); const int nb = k / QK; @@ -10135,7 +10222,7 @@ size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * for (int j = 0; j < n; j += k) { block_q4_1 * restrict y = (block_q4_1 *)dst + j/QK; - quantize_row_q4_1(src + j, y, k); + quantize_row_q4_1_reference(src + j, y, k); for (int i = 0; i < nb; i++) { for (int l = 0; l < QK; l += 2) { From 7f4c5c66514227c3870c2bd189fb0609fdd0de10 Mon Sep 17 00:00:00 2001 From: anzz1 Date: Tue, 28 Mar 2023 21:23:09 +0300 Subject: [PATCH 28/76] llama : fix linkage with mingw (#551) * Revert 7e53955 (#542) Still needs to be fixed properly * Fix linking on mingw32 --- examples/embedding/CMakeLists.txt | 2 +- examples/main/CMakeLists.txt | 2 +- examples/perplexity/CMakeLists.txt | 2 +- examples/quantize/CMakeLists.txt | 2 +- llama.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/embedding/CMakeLists.txt b/examples/embedding/CMakeLists.txt index def5b831b..88c425d4a 100644 --- a/examples/embedding/CMakeLists.txt +++ b/examples/embedding/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET embedding) add_executable(${TARGET} embedding.cpp) -target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/main/CMakeLists.txt b/examples/main/CMakeLists.txt index aa1f79406..b2dcc2910 100644 --- a/examples/main/CMakeLists.txt +++ b/examples/main/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET main) add_executable(${TARGET} main.cpp) -target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/perplexity/CMakeLists.txt b/examples/perplexity/CMakeLists.txt index 9bd8e376f..5836df8b2 100644 --- a/examples/perplexity/CMakeLists.txt +++ b/examples/perplexity/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET perplexity) add_executable(${TARGET} perplexity.cpp) -target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/quantize/CMakeLists.txt b/examples/quantize/CMakeLists.txt index 17a995bbd..fb27d4517 100644 --- a/examples/quantize/CMakeLists.txt +++ b/examples/quantize/CMakeLists.txt @@ -1,4 +1,4 @@ set(TARGET quantize) add_executable(${TARGET} quantize.cpp) -target_link_libraries(${TARGET} PRIVATE llama ggml ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/llama.h b/llama.h index f5a576c1e..587d85323 100644 --- a/llama.h +++ b/llama.h @@ -6,7 +6,7 @@ #include #ifdef LLAMA_SHARED -# ifdef _WIN32 +# ifdef _WIN32 && !defined __MINGW32__ # ifdef LLAMA_BUILD # define LLAMA_API __declspec(dllexport) # else From f1217055eaedfc7214be93d98e529cae89830430 Mon Sep 17 00:00:00 2001 From: anzz1 Date: Tue, 28 Mar 2023 22:43:25 +0300 Subject: [PATCH 29/76] CI: fix subdirectory path globbing (#546) - Changes in subdirectories will now be detecter properly - (Windows-MSVC) AVX512 tests temporarily disabled --- .github/workflows/build.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cb35a3298..b5cf71a5e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,10 +8,10 @@ on: required: true type: boolean push: - paths: ['.github/workflows/**', 'CMakeLists.txt', 'Makefile', '**.h', '*.c', '**.cpp'] + paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.c', '**/*.cpp'] pull_request: types: [opened, synchronize, edited, reopened, review_requested, ready_for_review] - paths: ['CMakeLists.txt', 'Makefile', '**.h', '*.c', '**.cpp'] + paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.c', '**/*.cpp'] env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} @@ -176,9 +176,7 @@ jobs: if: ${{ matrix.build == 'avx512' }} continue-on-error: true run: | - cd build - Set-Content -Path .\avx512f.exe -Value ([Convert]::FromBase64String('TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyAAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1vZGUuDQ0KJAAAAAAAAAClmfXY4fibi+H4m4vh+JuL4fiai+P4m4si98aL4vibi7Xbq4vg+JuLUmljaOH4m4sAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQRQAATAEBAGo6H2QAAAAAAAAAAOAADwELAQYAAAIAAAAAAAAAAAAADBAAAAAQAAAAIAAAAABAAAAQAAAAAgAABAAAAAAAAAAEAAAAAAAAAAAgAAAAAgAAAAAAAAMAAAAAABAAABAAAAAAEAAAEAAAAAAAABAAAAAAAAAAAAAAAFQQAAAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC50ZXh0AAAAsgAAAAAQAAAAAgAAAAIAAAAAAAAAAAAAAAAAACAAAGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUEAAAiBAAAAAAAABVi+xRUVNTuAcAAAAPosHrEGaD4wGJXfxbg0X8MI1F+GoAUI1F/GoBUGr1/xUAEEAAUP8VBBBAAItF/FuDwND32BvAQMnDzMx8EAAAAAAAAAAAAACkEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlBAAAIgQAAAAAAAApANXcml0ZUZpbGUAuQFHZXRTdGRIYW5kbGUAAEtFUk5FTDMyLmRsbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==')) -AsByteStream - .\avx512f.exe && echo " AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo " AVX512F: NO" + echo "TODO: check avx512f" - name: Test id: cmake_test From 5a5f8b1501fbb34367225544010ddfc306d6d2fe Mon Sep 17 00:00:00 2001 From: anzz1 Date: Tue, 28 Mar 2023 22:44:29 +0300 Subject: [PATCH 30/76] Enable Fused-Multiply-Add (FMA) and F16C/CVT16 vector extensions on MSVC (#375) * Enable Fused-Multiply-Add (FMA) instructions on MSVC __FMA__ macro does not exist in MSVC * Enable F16C/CVT16 vector extensions on MSVC __F16C__ macro does not exist in MSVC, but is implied with AVX2/AVX512 * MSVC cvt intrinsics * Add __SSE3__ macro for MSVC too because why not even though it's not currently used for anything when AVX is defined --- ggml.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/ggml.c b/ggml.c index 222d199be..efe9316bb 100644 --- a/ggml.c +++ b/ggml.c @@ -79,6 +79,19 @@ static int sched_yield (void) { typedef void* thread_ret_t; #endif +// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 +#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__)) +#ifndef __FMA__ +#define __FMA__ +#endif +#ifndef __F16C__ +#define __F16C__ +#endif +#ifndef __SSE3__ +#define __SSE3__ +#endif +#endif + #ifdef __HAIKU__ #define static_assert(cond, msg) _Static_assert(cond, msg) #endif @@ -172,8 +185,13 @@ typedef double ggml_float; #ifdef __F16C__ +#ifdef _MSC_VER +#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) +#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) +#else #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) +#endif #elif defined(__POWER9_VECTOR__) From a5c42c4b13b3be9e58fe8f9adbb6ee60417674a6 Mon Sep 17 00:00:00 2001 From: anzz1 Date: Wed, 29 Mar 2023 16:19:29 +0300 Subject: [PATCH 31/76] Fix typo in llama.h (#593) --- llama.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.h b/llama.h index 587d85323..3368de3e0 100644 --- a/llama.h +++ b/llama.h @@ -6,7 +6,7 @@ #include #ifdef LLAMA_SHARED -# ifdef _WIN32 && !defined __MINGW32__ +# if defined(_WIN32) && !defined(__MINGW32__) # ifdef LLAMA_BUILD # define LLAMA_API __declspec(dllexport) # else From 83df5639eb182ed7c122382907691d8baa3c32df Mon Sep 17 00:00:00 2001 From: anzz1 Date: Wed, 29 Mar 2023 16:20:07 +0300 Subject: [PATCH 32/76] Fix GCC warning about binary literal (#595) 0b10101010 -> 0xAA /* 0b10101010 */ --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index efe9316bb..c049f00a9 100644 --- a/ggml.c +++ b/ggml.c @@ -1962,7 +1962,7 @@ static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * rest // Compute cross scales for the block const __m256 scale_0 = _mm256_mul_ps( d0v, m1v ); const __m256 scale_1 = _mm256_mul_ps( m0v, d1v ); - const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0b10101010 ); + const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0xAA /* 0b10101010 */ ); // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes __m256i bx = bytesFromNibbles( x[i].qs ); From a6956b25a1c783e5e96fe06c9c00438f846ef047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tobias=20L=C3=BCtke?= Date: Wed, 29 Mar 2023 17:10:24 +0200 Subject: [PATCH 33/76] add example of re-act pattern (#583) * add example of re-act pattern * spelling... * fixed whitespace in reverse prompt issue --- examples/reason-act.sh | 17 +++++++++++++++++ prompts/reason-act.txt | 18 ++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100755 examples/reason-act.sh create mode 100644 prompts/reason-act.txt diff --git a/examples/reason-act.sh b/examples/reason-act.sh new file mode 100755 index 000000000..e7fe655db --- /dev/null +++ b/examples/reason-act.sh @@ -0,0 +1,17 @@ + +#!/bin/bash + +cd `dirname $0` +cd .. + +# get -m model parameter otherwise defer to default +if [ "$1" == "-m" ]; then + MODEL="-m $2 " +fi + +./main $MODEL --color \ + -f ./prompts/reason-act.txt \ + -i --interactive-first \ + --top_k 10000 --temp 0.2 --repeat_penalty 1 -t 7 -c 2048 \ + -r "Question:" -r "Observation:" --in-prefix " " \ + -n -1 diff --git a/prompts/reason-act.txt b/prompts/reason-act.txt new file mode 100644 index 000000000..872016631 --- /dev/null +++ b/prompts/reason-act.txt @@ -0,0 +1,18 @@ +You run in a loop of Thought, Action, Observation. +At the end of the loop either Answer or restate your Thought and Action. +Use Thought to describe your thoughts about the question you have been asked. +Use Action to run one of these actions available to you: +- calculate[python math expression] +Observation will be the result of running those actions + + +Question: What is 4 * 7 / 3? +Thought: Do I need to use an action? Yes, I use calculate to do math +Action: calculate[4 * 7 / 3] +Observation: 9.3333333333 +Thought: Do I need to use an action? No, have the result +Answer: The calculate tool says it is 9.3333333333 +Question: What is capital of france? +Thought: Do I need to use an action? No, I know the answer +Answer: Paris is the capital of France +Question: From 41318d708ed196ff727dce14d263a64b23c7333d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ABl=20Kerbiriou?= Date: Wed, 29 Mar 2023 18:10:07 +0200 Subject: [PATCH 34/76] llama : use the same threshold for OpenBLAS and ggml thread limiting (#577) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 2d0279258..aa0c362d9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -856,7 +856,7 @@ static bool llama_eval_internal( // for big prompts, if BLAS is enabled, it is better to use only one thread // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance ggml_cgraph gf = {}; - gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads; + gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads; struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); memcpy(embd->data, tokens, N*ggml_element_size(embd)); From 53635c081c49321d523567112f9fddfbba6b787b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 19:29:26 +0300 Subject: [PATCH 35/76] py : add GPT4All conversion script For now: copy-paste Too much time for me to deduplicate the python code --- convert-gpt4all-to-ggml.py | 107 ++++++++++++++++++++++++++++ convert-unversioned-ggml-to-ggml.py | 2 +- 2 files changed, 108 insertions(+), 1 deletion(-) create mode 100644 convert-gpt4all-to-ggml.py diff --git a/convert-gpt4all-to-ggml.py b/convert-gpt4all-to-ggml.py new file mode 100644 index 000000000..f1d9d7aef --- /dev/null +++ b/convert-gpt4all-to-ggml.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +# +# TODO: deduplicate GPT4All with convert-unversioned-ggml-to-ggml.py +# + +# Original by https://github.com/eiz +# https://github.com/ggerganov/llama.cpp/issues/324#issuecomment-1476227818 +import argparse +import glob +import os +import struct +import sys +from sentencepiece import SentencePieceProcessor + +HPARAMS = keys = ["vocab_size", "dim", "multiple_of", "n_heads", "n_layers"] + +def parse_args(): + parser = argparse.ArgumentParser(description='Upgrade a GPT4All model to the current format') + parser.add_argument('gpt4all_model', help='path to gpt4all-lora-quantized.bin') + parser.add_argument('tokenizer_model', help='path to LLaMA tokenizer.model file') + return parser.parse_args() + +def read_header(f_in): + struct_fmt = "i" * (3 + len(HPARAMS)) + struct_size = struct.calcsize(struct_fmt) + buf = f_in.read(struct_size) + return struct.unpack(struct_fmt, buf) + +def write_header(f_out, header): + (magic, vocab_size, dim, multiple_of, n_heads, n_layers, rot, ftype) = header + + if magic != 0x67676d6c: + raise Exception('Invalid file magic. Must be an old style ggml file.') + + values = [ + 0x67676d66, # magic: ggml in hex + 1, # file version + vocab_size, + dim, + multiple_of, + n_heads, + n_layers, + rot, + ftype + ] + f_out.write(struct.pack("i" * len(values), *values)) + +def write_tokens(fout, tokenizer): + for i in range(tokenizer.vocab_size()): + if tokenizer.is_unknown(i): + text = " \u2047 ".encode("utf-8") + elif tokenizer.is_control(i): + text = b"" + elif tokenizer.is_byte(i): + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + print(f"Invalid token: {piece}") + sys.exit(1) + byte_value = int(piece[3:-1], 16) + text = struct.pack("B", byte_value) + else: + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + fout.write(struct.pack("i", len(text))) + fout.write(text) + fout.write(struct.pack("f", tokenizer.get_score(i))) + + # TODO: GPT4All - add extra token + text = "".encode("utf-8") + fout.write(struct.pack("i", len(text))) + fout.write(text) + fout.write(struct.pack("f", 0.0)) + +def read_tokens(f_in, tokenizer): + for i in range(tokenizer.vocab_size()): + len_b = f_in.read(4) + (length,) = struct.unpack("i", len_b) + f_in.read(length) + +def copy_all_data(f_out, f_in): + while True: + buf = f_in.read(1024 * 1024) + if not buf: + break + f_out.write(buf) + +def convert_one_file(path_in, tokenizer): + path_tmp = f"{path_in}.tmp" + path_orig= f"{path_in}.orig" + print(f"converting {path_in}") + with open(path_in, "rb") as f_in, open(path_tmp, "wb") as f_out: + write_header(f_out, read_header(f_in)) + read_tokens(f_in, tokenizer) + write_tokens(f_out, tokenizer) + copy_all_data(f_out, f_in) + os.rename(path_in, path_orig) + os.rename(path_tmp, path_in) + +def main(): + args = parse_args() + + tokenizer = SentencePieceProcessor(args.tokenizer_model) + + convert_one_file(args.gpt4all_model, tokenizer) + +if __name__ == "__main__": + main() diff --git a/convert-unversioned-ggml-to-ggml.py b/convert-unversioned-ggml-to-ggml.py index 2457e3181..33b6243bd 100644 --- a/convert-unversioned-ggml-to-ggml.py +++ b/convert-unversioned-ggml-to-ggml.py @@ -27,7 +27,7 @@ def write_header(f_out, header): if magic != 0x67676d6c: raise Exception('Invalid file magic. Must be an old style ggml file.') - + values = [ 0x67676d66, # magic: ggml in hex 1, # file version From 516d88e75c9e768c0001a452dbad212494c586b3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 19:37:20 +0300 Subject: [PATCH 36/76] readme : add GPT4All instructions (close #588) --- README.md | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5675a927b..c2323f40a 100644 --- a/README.md +++ b/README.md @@ -10,9 +10,7 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ **Hot topics:** - [Roadmap (short-term)](https://github.com/ggerganov/llama.cpp/discussions/457) -- New C-style API is now available: https://github.com/ggerganov/llama.cpp/pull/370 -- Cache input prompts for faster initialization: https://github.com/ggerganov/llama.cpp/issues/64 -- Create a `llama.cpp` logo: https://github.com/ggerganov/llama.cpp/issues/105 +- Support for [GPT4All](https://github.com/ggerganov/llama.cpp#using-gpt4all) ## Description @@ -37,6 +35,12 @@ Supported platforms: - [X] Windows (via CMake) - [X] Docker +Supported models: + +- [X] LLaMA +- [X] [Alpaca](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca) +- [X] [GPT4All](https://github.com/ggerganov/llama.cpp#using-gpt4all) + --- Here is a typical run using LLaMA-7B: @@ -222,6 +226,17 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach. > ``` +### Using [GPT4All](https://github.com/nomic-ai/gpt4all) + +- Obtain the `gpt4all-lora-quantized.bin` model +- It is distributed in the old `ggml` format which is not obsoleted. So you have to convert it to the new format using [./convert-gpt4all-to-ggml.py](./convert-gpt4all-to-ggml.py): + + ```bash + python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model + ``` + +- You can now use the newly generated `gpt4all-lora-quantized.bin` model in exactly the same way as all other models. The original model is stored in the same folder with a suffix `.orig` + ### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data - **Under no circumstances share IPFS, magnet links, or any other links to model downloads anywhere in this respository, including in issues, discussions or pull requests. They will be immediately deleted.** From b467702b87461543c75013207e9adc6d20dcc01d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 19:38:31 +0300 Subject: [PATCH 37/76] readme : fix typos --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c2323f40a..e30452ee0 100644 --- a/README.md +++ b/README.md @@ -229,13 +229,15 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach. ### Using [GPT4All](https://github.com/nomic-ai/gpt4all) - Obtain the `gpt4all-lora-quantized.bin` model -- It is distributed in the old `ggml` format which is not obsoleted. So you have to convert it to the new format using [./convert-gpt4all-to-ggml.py](./convert-gpt4all-to-ggml.py): +- It is distributed in the old `ggml` format which is now obsoleted +- You have to convert it to the new format using [./convert-gpt4all-to-ggml.py](./convert-gpt4all-to-ggml.py): ```bash python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model ``` -- You can now use the newly generated `gpt4all-lora-quantized.bin` model in exactly the same way as all other models. The original model is stored in the same folder with a suffix `.orig` +- You can now use the newly generated `gpt4all-lora-quantized.bin` model in exactly the same way as all other models +- The original model is saved in the same folder with a suffix `.orig` ### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data From d9ad104440d84a0cc0734bff47ef0ba41ba740c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9rence?= <13496987+Royalphax@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:21:09 +0200 Subject: [PATCH 38/76] Create chat-13B.bat (#592) * Create chat-13B.bat Same script than chat-13B.sh, but for windows users. Tested and working on windows 10/11 v 22H2 * Apply suggestions from code review --------- Co-authored-by: anzz1 --- examples/chat-13B.bat | 57 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 examples/chat-13B.bat diff --git a/examples/chat-13B.bat b/examples/chat-13B.bat new file mode 100644 index 000000000..c5c8ac6ef --- /dev/null +++ b/examples/chat-13B.bat @@ -0,0 +1,57 @@ +@setlocal disabledelayedexpansion enableextensions +@echo off + +cd /d "%~dp0.." +if not "%errorlevel%"=="0" ( + echo Unable to change directory. + pause + exit /b 1 +) + +if not defined MODEL set "MODEL=models\13B\ggml-model-q4_0.bin" +if not defined USER_NAME set "USER_NAME=User" +if not defined AI_NAME set "AI_NAME=ChatLLaMa" +rem Adjust to the number of CPU cores you want to use. +rem if not defined N_THREAD set "N_THREAD=8" +rem Number of tokens to predict (made it larger than default because we want a long interaction) +if not defined N_PREDICTS set "N_PREDICTS=2048" +if not defined GEN_OPTIONS set "GEN_OPTIONS=--ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647" + +rem Default main script paths +set "DEFAULT_MAIN_SCRIPT_PATHS=main.exe build\bin\main.exe" + +rem Get main script path from command line arguments +set "MAIN_SCRIPT_PATH=%~1" + +rem If the main script path was not specified, try the default paths +if not defined MAIN_SCRIPT_PATH ( + for %%i in (%DEFAULT_MAIN_SCRIPT_PATHS%) do ( + if exist "%%i" set "MAIN_SCRIPT_PATH=%%i" + ) +) + +rem If the main script path was not found, tell the user how to specify it +if not defined MAIN_SCRIPT_PATH ( + echo The main script could not be found. Please provide the path to the main script as 1st argument to this script, or place the main script in one of the default locations: + echo %DEFAULT_MAIN_SCRIPT_PATHS% + pause + exit /b 1 +) + +rem Default context, feel free to edit it +set "PROMPT_TEXT=Text transcript of a never ending dialog, where %USER_NAME% interacts with an AI assistant named %AI_NAME%. %AI_NAME% is helpful, kind, honest, friendly, good at writing and never fails to answer %USER_NAME%'s requests immediately and with details and precision. There are no annotations like (30 seconds passed...) or (to himself), just what %USER_NAME% and %AI_NAME% say aloud to each other. The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. The transcript only includes text, it does not include markup like HTML and Markdown." + +rem Set a temporary variable if N_THREAD is set +if defined N_THREAD ( + set "_N_THREAD=--threads %N_THREAD%" +) else ( + set "_N_THREAD=" +) + +rem Run the script +echo "%MAIN_SCRIPT_PATH%" %GEN_OPTIONS% %_N_THREAD% ^ + --model "%MODEL%" ^ + --n_predict %N_PREDICTS% ^ + --color --interactive ^ + --reverse-prompt "%USER_NAME%:" ^ + --prompt "%PROMPT_TEXT%" From 61cbfff5c95e45236883b1b60e025f8f6fa8c8a3 Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Wed, 29 Mar 2023 20:09:25 +0200 Subject: [PATCH 39/76] rename convert_ggml_to_pth.py -> convert-ggml-to-pth.py (#600) to match filenames of other converters --- convert_ggml_to_pth.py => convert-ggml-to-pth.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename convert_ggml_to_pth.py => convert-ggml-to-pth.py (100%) diff --git a/convert_ggml_to_pth.py b/convert-ggml-to-pth.py similarity index 100% rename from convert_ggml_to_pth.py rename to convert-ggml-to-pth.py From 3b44d30d9b618f0f2eb9abcfe912770a4e7d85d4 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 21:47:33 +0300 Subject: [PATCH 40/76] ggml : add ARM_NEON ggml_vec_dot_q4_1() --- ggml.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/ggml.c b/ggml.c index c049f00a9..0906cf90e 100644 --- a/ggml.c +++ b/ggml.c @@ -2008,6 +2008,45 @@ static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * rest res = _mm_add_ss( res, _mm_movehdup_ps( res ) ); sumf = _mm_cvtss_f32( res ) + acc_offset * QK; +#elif defined(__ARM_NEON) + float sum00 = 0.0f; + float sum01 = 0.0f; + float sum10 = 0.0f; + float sum11 = 0.0f; + + for (int i = 0; i < nb; ++i) { + const block_q4_1 * restrict x0 = &x[i + 0]; + const block_q4_1 * restrict y0 = &y[i + 0]; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v1_0 = vld1q_u8(y0->qs); + + // and with 0xf + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + + // dot product into uint16x8_t + const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l)); + const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l)); + + const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h)); + const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h)); + + const uint16x8_t pl0 = vaddq_u16(pl0l, pl0h); + const uint16x8_t ph0 = vaddq_u16(ph0l, ph0h); + + sum00 += x0->m*y0->m; + sum01 += y0->m*x0->d*(vaddvq_u8(v0_0l) + vaddvq_u8(v0_0h)); + sum10 += x0->m*y0->d*(vaddvq_u8(v1_0l) + vaddvq_u8(v1_0h)); + sum11 += x0->d*y0->d*vaddvq_u16(vaddq_u16(pl0, ph0)); + } + + sumf = QK*sum00 + sum01 + sum10 + sum11; #else // scalar for (int i = 0; i < nb; i++) { From f202ada131f60059112a948f660b2e0ac93d049a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 22:03:02 +0300 Subject: [PATCH 41/76] ggml : add ARM_NEON quantize_row_q4_1() --- ggml.c | 56 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/ggml.c b/ggml.c index 0906cf90e..51cd3b91c 100644 --- a/ggml.c +++ b/ggml.c @@ -564,10 +564,7 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int } } #elif __ARM_NEON - uint8_t pp[QK/2]; for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - float32x4_t srcv [8]; float32x4_t asrcv[8]; float32x4_t amaxv[8]; @@ -579,7 +576,8 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]); for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]); - amax = MAX( + // absolute max + const float amax = MAX( MAX(vgetq_lane_f32(amaxv[0], 0), vgetq_lane_f32(amaxv[0], 1)), MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3))); @@ -593,11 +591,9 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f)); const int32x4_t vi = vcvtq_s32_f32(vf); - pp[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); - pp[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); + y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); + y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); } - - memcpy(y[i].qs, pp, sizeof(pp)); } #elif defined(__AVX2__) for (int i = 0; i < nb; i++) { @@ -665,7 +661,6 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int _mm_storeu_si128( ( __m128i* )y[i].qs, res ); } #elif defined(__wasm_simd128__) - uint8_t pp[QK/2]; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max @@ -694,11 +689,9 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f)); const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf); - pp[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4); - pp[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4); + y[i].qs[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4); + y[i].qs[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4); } - - memcpy(y[i].qs, pp, sizeof(pp)); } #else // scalar @@ -750,11 +743,11 @@ static void quantize_row_q4_1_reference(const float * restrict x, void * restric static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) { assert(k % QK == 0); -#if defined(__AVX2__) const int nb = k / QK; block_q4_1 * restrict y = vy; +#if defined(__AVX2__) for (int i = 0; i < nb; i++) { // Load elements into 4 AVX vectors __m256 v0 = _mm256_loadu_ps( x ); @@ -828,6 +821,41 @@ static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int __m128i res = packNibbles( i0 ); _mm_storeu_si128( ( __m128i* )y[i].qs, res ); } +#elif __ARM_NEON + for (int i = 0; i < nb; i++) { + float32x4_t srcv[8]; + float32x4_t minv[8]; + float32x4_t maxv[8]; + + for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l); + + for (int l = 0; l < 4; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]); + for (int l = 0; l < 2; l++) minv[4*l] = vminq_f32(minv[4*l], minv[4*l + 2]); + for (int l = 0; l < 1; l++) minv[8*l] = vminq_f32(minv[8*l], minv[8*l + 4]); + + for (int l = 0; l < 4; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l + 1]); + for (int l = 0; l < 2; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l + 2]); + for (int l = 0; l < 1; l++) maxv[8*l] = vmaxq_f32(maxv[8*l], maxv[8*l + 4]); + + const float min = vminvq_f32(minv[0]); + const float max = vmaxvq_f32(maxv[0]); + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + y[i].m = min; + + const float32x4_t minv0 = vdupq_n_f32(min); + + for (int l = 0; l < 8; l++) { + const float32x4_t v = vmulq_n_f32(vsubq_f32(srcv[l], minv0), id); + const int32x4_t vi = vcvtq_s32_f32(v); + + y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); + y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); + } + } #else // scalar quantize_row_q4_1_reference(x, vy, k); From cea1c859483a5cfc7e2b31a06f8561d7a7604870 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 22:10:01 +0300 Subject: [PATCH 42/76] ggml : add ARM_NEON dequantize_row_q4_1() --- ggml.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/ggml.c b/ggml.c index 51cd3b91c..ccdba30e0 100644 --- a/ggml.c +++ b/ggml.c @@ -1016,6 +1016,50 @@ static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, in } } } +#elif defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + const float32x4_t vd = vdupq_n_f32(x[i].d); + const float32x4_t vm = vdupq_n_f32(x[i].m); + + const uint8_t * restrict pp = x[i].qs; + + for (int l = 0; l < QK; l += 16) { + // Load 16x4-bit integers into 8x8-bit integers + const uint8x8_t v8 = vld1_u8(pp + l/2); + + // Expand 4-bit qs to 8-bit bytes + const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f)); + const uint8x8_t v1 = vshr_n_u8(v8, 4); + + // Interleave and combine + const uint8x8_t vx_0 = vzip1_u8(v0, v1); + const uint8x8_t vx_1 = vzip2_u8(v0, v1); + + const uint8x16_t vq = vcombine_u8(vx_0, vx_1); + + // convert to 2x uint16x8_t + const uint16x8_t vi_0 = vmovl_s8(vget_low_u8 (vq)); + const uint16x8_t vi_1 = vmovl_s8(vget_high_u8(vq)); + + // convert to 4x float32x4_t + const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0))); + const float32x4_t vf_1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_0))); + const float32x4_t vf_2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_1))); + const float32x4_t vf_3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_1))); + + // multiply by d and add m + const float32x4_t r0 = vmlaq_f32(vm, vf_0, vd); + const float32x4_t r1 = vmlaq_f32(vm, vf_1, vd); + const float32x4_t r2 = vmlaq_f32(vm, vf_2, vd); + const float32x4_t r3 = vmlaq_f32(vm, vf_3, vd); + + // Store + vst1q_f32(y + i*QK + l + 0, r0); + vst1q_f32(y + i*QK + l + 4, r1); + vst1q_f32(y + i*QK + l + 8, r2); + vst1q_f32(y + i*QK + l + 12, r3); + } + } #else for (int i = 0; i < nb; i++) { const float d = x[i].d; From 0ba76c1e73ae21038b80bfb5a746157376c88173 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 22:13:12 +0300 Subject: [PATCH 43/76] llama : fix compile warnings when reading the vocab --- llama.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llama.cpp b/llama.cpp index aa0c362d9..e4998efa2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1444,7 +1444,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s return false; } - std::string word; + std::vector word(32); vocab.id_to_token.resize(n_vocab); for (int i = 0; i < n_vocab; i++) { uint32_t len; @@ -1459,10 +1459,10 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s finp.read ((char *) &score, sizeof(score)); fout.write((char *) &score, sizeof(score)); - vocab.token_to_id[word] = i; + vocab.token_to_id[word.data()] = i; auto &tok_score = vocab.id_to_token[i]; - tok_score.tok = word; + tok_score.tok = word.data(); tok_score.score = score; } } From b51c717d5cf9181c33afcb84554e47f6d539c891 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Mar 2023 22:15:34 +0300 Subject: [PATCH 44/76] ggml : init time on first ggml_init() call --- ggml.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ggml.c b/ggml.c index ccdba30e0..02675ee67 100644 --- a/ggml.c +++ b/ggml.c @@ -2748,6 +2748,9 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { static bool is_first_call = true; if (is_first_call) { + // initialize time system (required on Windows) + ggml_time_init(); + // initialize GELU, SILU and EXP F32 tables { const uint64_t t_start = ggml_time_us(); UNUSED(t_start); From 9cbc404ba6699a9ba4925ea25a60552b13491c7a Mon Sep 17 00:00:00 2001 From: anzz1 Date: Wed, 29 Mar 2023 23:44:39 +0300 Subject: [PATCH 45/76] ci : re-enable AVX512 testing (Windows-MSVC) (#584) * CI: Re-enable AVX512 testing (Windows-MSVC) Now with 100% less base64 encoding * plain __cpuid is enough here --- .github/workflows/build.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b5cf71a5e..88e70e495 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -176,7 +176,13 @@ jobs: if: ${{ matrix.build == 'avx512' }} continue-on-error: true run: | - echo "TODO: check avx512f" + cd build + $vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath) + $msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim())) + $cl = $(join-path $msvc 'bin\Hostx64\x64\cl.exe') + echo 'int main(void){unsigned int a[4];__cpuid(a,7);return !(a[1]&65536);}' >> avx512f.c + & $cl /O2 /GS- /kernel avx512f.c /link /nodefaultlib /entry:main + .\avx512f.exe && echo "AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo "AVX512F: NO" - name: Test id: cmake_test From ed3c680bcd0e8ce6e574573ba95880b694449878 Mon Sep 17 00:00:00 2001 From: slaren <2141330+slaren@users.noreply.github.com> Date: Thu, 30 Mar 2023 11:16:30 +0200 Subject: [PATCH 46/76] Fix GGML_F32Cx8_STORE in AVX without F16C path (#619) --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index 02675ee67..b7d79ab96 100644 --- a/ggml.c +++ b/ggml.c @@ -1297,7 +1297,7 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { _mm256_storeu_ps(arr, y); for (int i = 0; i < 8; i++) - x[i] = GGML_FP16_TO_FP32(arr[i]); + x[i] = GGML_FP32_TO_FP16(arr[i]); } #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) From 77efdf5a501b1140801da5cd8751e9f9b259ec32 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 30 Mar 2023 20:27:32 +0300 Subject: [PATCH 47/76] ggml : fix NEON signs (close #620, #622) --- ggml.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index b7d79ab96..2bcab68bf 100644 --- a/ggml.c +++ b/ggml.c @@ -1038,8 +1038,8 @@ static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, in const uint8x16_t vq = vcombine_u8(vx_0, vx_1); // convert to 2x uint16x8_t - const uint16x8_t vi_0 = vmovl_s8(vget_low_u8 (vq)); - const uint16x8_t vi_1 = vmovl_s8(vget_high_u8(vq)); + const uint16x8_t vi_0 = vmovl_u8(vget_low_u8 (vq)); + const uint16x8_t vi_1 = vmovl_u8(vget_high_u8(vq)); // convert to 4x float32x4_t const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0))); From 1f0414feecc336482163af6c1e5650f9373ed8c9 Mon Sep 17 00:00:00 2001 From: david raistrick Date: Thu, 30 Mar 2023 13:34:45 -0400 Subject: [PATCH 48/76] make : fix darwin f16c flags check (#615) ...there was no check. ported upstream from https://github.com/zanussbaum/gpt4all.cpp/pull/2 (I dont see any clean path for upstream patches) --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9cfa89f7a..83a4514ef 100644 --- a/Makefile +++ b/Makefile @@ -71,7 +71,10 @@ endif # feel free to update the Makefile for your architecture and send a pull request or issue ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686)) ifeq ($(UNAME_S),Darwin) - CFLAGS += -mf16c + F16C_M := $(shell sysctl machdep.cpu.features) + ifneq (,$(findstring F16C,$(F16C_M))) + CFLAGS += -mf16c + endif AVX1_M := $(shell sysctl machdep.cpu.features) ifneq (,$(findstring FMA,$(AVX1_M))) CFLAGS += -mfma From a4755cf288deb83df646f91f8fc98613271322db Mon Sep 17 00:00:00 2001 From: Casey Primozic Date: Thu, 30 Mar 2023 10:53:35 -0700 Subject: [PATCH 49/76] Remove unused variable (#607) * It seems some new warning were added recently that exposed this. I wrote the code that included this unused variable originally and it is indeed not needed. --- ggml.c | 1 - 1 file changed, 1 deletion(-) diff --git a/ggml.c b/ggml.c index 2bcab68bf..6a36bc952 100644 --- a/ggml.c +++ b/ggml.c @@ -1829,7 +1829,6 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest const int superblock_size = 8; const int superblock_count = nb / superblock_size; - const int remainder = nb % superblock_size; for (int superblock_ix = 0; superblock_ix < superblock_count; superblock_ix += 1) { int i = superblock_ix * superblock_size; From 3bcc129ba881c99795e850b0a23707a4dfdabe9d Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Thu, 30 Mar 2023 17:56:59 +0000 Subject: [PATCH 50/76] cmake : properly invoke CTest (#629) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d7b0eba29..37f22700b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -253,7 +253,7 @@ endif() # if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) - enable_testing() + include(CTest) add_subdirectory(tests) endif () From c03ae8dca1d7c451054754979e60a6de1f64c3cd Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Wed, 29 Mar 2023 02:03:43 +0200 Subject: [PATCH 51/76] Add mmap support for model files --- ggml.c | 9 ++++-- ggml.h | 1 + llama.cpp | 86 +++++++++++++++++++++++++++++++++++++++++++------------ 3 files changed, 74 insertions(+), 22 deletions(-) diff --git a/ggml.c b/ggml.c index 6a36bc952..4ea715957 100644 --- a/ggml.c +++ b/ggml.c @@ -2529,8 +2529,9 @@ struct ggml_context { void * mem_buffer; bool mem_buffer_owned; bool mem_buffer_mlocked; + bool no_alloc; - int n_objects; + int n_objects; struct ggml_object * objects_begin; struct ggml_object * objects_end; @@ -2815,6 +2816,7 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size), /*.mem_buffer_owned =*/ params.mem_buffer ? false : true, /*.mem_buffer_mlocked =*/ false, + /*.no_alloc =*/ params.no_alloc, /*.n_objects =*/ 0, /*.objects_begin =*/ NULL, /*.objects_end =*/ NULL, @@ -2930,7 +2932,7 @@ struct ggml_tensor * ggml_new_tensor_impl( size_t size_needed = 0; - if (data == NULL) { + if (data == NULL && !ctx->no_alloc) { size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]); for (int i = 1; i < n_dims; i++) { size_needed *= ne[i]; @@ -3014,7 +3016,7 @@ struct ggml_tensor * ggml_new_tensor_impl( /*.perf_runs =*/ 0, /*.perf_cycles =*/ 0, /*.perf_time_us =*/ 0, - /*.data =*/ data == NULL ? (void *)(result + 1) : data, + /*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data, /*.pad =*/ { 0 }, }; @@ -10277,6 +10279,7 @@ enum ggml_opt_result ggml_opt( struct ggml_init_params params_ctx = { .mem_size = 16*1024*1024, .mem_buffer = NULL, + .no_alloc = false, }; ctx = ggml_init(params_ctx); diff --git a/ggml.h b/ggml.h index 335230f9f..058dfe230 100644 --- a/ggml.h +++ b/ggml.h @@ -316,6 +316,7 @@ struct ggml_init_params { // memory pool size_t mem_size; // bytes void * mem_buffer; // if NULL, memory will be allocated internally + bool no_alloc; // don't allocate memory for the tensor data }; void ggml_time_init(void); // call this once at the beginning of the program diff --git a/llama.cpp b/llama.cpp index e4998efa2..d7126f459 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12,6 +12,13 @@ #include #include +// headers for POSIX mmap +#if defined (__unix__) || defined (__APPLE__) +# include +# include +# include +#endif + #define LLAMA_USE_SCRATCH #define LLAMA_MAX_SCRATCH_BUFFERS 16 @@ -246,6 +253,7 @@ static bool kv_cache_init( struct ggml_init_params params; params.mem_size = cache.buf.size(); params.mem_buffer = cache.buf.data(); + params.no_alloc = false; cache.ctx = ggml_init(params); @@ -288,6 +296,26 @@ struct llama_context_params llama_context_default_params() { // model loading // +void * mmap_file(const char* fname) { +#if defined(MAP_FAILED) + // POSIX mmap + int fd = open(fname, O_RDONLY); + size_t len = lseek(fd, 0, SEEK_END); + void * mm_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); + if (mm_addr == MAP_FAILED) { + perror("mmap failed"); + mm_addr = NULL; + } + close(fd); + return mm_addr; +#else + // TODO: windows support + (void)(fname); // suppress warnings + return NULL; +#endif +} + + static bool llama_model_load( const std::string & fname, llama_context & lctx, @@ -303,6 +331,7 @@ static bool llama_model_load( lctx.t_start_us = t_start_us; + // TODO: this could probably be smaller when using mmap std::vector f_buf(1024*1024); auto & model = lctx.model; @@ -449,39 +478,49 @@ static bool llama_model_load( } } + bool use_mmap = (n_parts == 1); + + // try to memory map the model file + void* mm_addr = NULL; + if (use_mmap) { + mm_addr = mmap_file(fname.c_str()); + if (mm_addr == NULL) { + use_mmap = false; + } + } + + + auto & ctx = model.ctx; size_t ctx_size = 0; - { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; - ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings + if (!use_mmap) { + ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings - ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm + ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm - ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output + ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm - ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1 - ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2 - ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3 - - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v + ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1 + ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2 + ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3 + } ctx_size += (5 + 10*n_layer)*256; // object overhead @@ -514,6 +553,7 @@ static bool llama_model_load( struct ggml_init_params params = { /*.mem_size =*/ lctx.model.buf.size(), /*.mem_buffer =*/ lctx.model.buf.data(), + /*.no_alloc =*/ use_mmap, }; model.ctx = ggml_init(params); @@ -595,7 +635,7 @@ static bool llama_model_load( fname_part += "." + std::to_string(i); } - fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str()); + fprintf(stderr, "%s: loading model part %d/%d from '%s'%s\n", __func__, i+1, n_parts, fname_part.c_str(), use_mmap ? " (memory mapped)" : ""); fin = std::ifstream(fname_part, std::ios::binary); fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); @@ -736,7 +776,14 @@ static bool llama_model_load( } if (part_id == 0) { - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); + if (mm_addr) { + off_t offset = fin.tellg(); + tensor->data = (char *) mm_addr + offset; + fin.seekg(ggml_nbytes(tensor), std::ios::cur); + } + else { + fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); + } } else { fin.seekg(ggml_nbytes(tensor), std::ios::cur); } @@ -849,6 +896,7 @@ static bool llama_eval_internal( struct ggml_init_params params = { /*.mem_size =*/ buf_compute.size(), /*.mem_buffer =*/ buf_compute.data(), + /*.no_alloc =*/ false, }; struct ggml_context * ctx0 = ggml_init(params); From 64bde3ffd4aef799acb790a3eedddbd0a0612108 Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Wed, 29 Mar 2023 05:38:57 +0200 Subject: [PATCH 52/76] Fix ggml_init_params in quantize --- examples/quantize/quantize.cpp | 2 +- llama.cpp | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index b444328ac..680757c6b 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -19,7 +19,7 @@ int main(int argc, char ** argv) { // needed to initialize f16 tables { - struct ggml_init_params params = { 0, NULL }; + struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); } diff --git a/llama.cpp b/llama.cpp index d7126f459..1adeee5f0 100644 --- a/llama.cpp +++ b/llama.cpp @@ -315,7 +315,6 @@ void * mmap_file(const char* fname) { #endif } - static bool llama_model_load( const std::string & fname, llama_context & lctx, @@ -489,8 +488,6 @@ static bool llama_model_load( } } - - auto & ctx = model.ctx; size_t ctx_size = 0; From d68c5dc4356c8f49e933df210f2ceca5002a8118 Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Wed, 29 Mar 2023 06:18:18 +0200 Subject: [PATCH 53/76] Make mmap_file static --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 1adeee5f0..096735c8f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -296,7 +296,7 @@ struct llama_context_params llama_context_default_params() { // model loading // -void * mmap_file(const char* fname) { +static void * mmap_file(const char* fname) { #if defined(MAP_FAILED) // POSIX mmap int fd = open(fname, O_RDONLY); From 276e5b781155e3bbe6834472c58f03dfe62efabe Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:31:26 +0200 Subject: [PATCH 54/76] Unmap the file in llama_free --- llama.cpp | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/llama.cpp b/llama.cpp index 096735c8f..0c220e4ae 100644 --- a/llama.cpp +++ b/llama.cpp @@ -149,6 +149,10 @@ struct llama_model { // the model memory buffer std::vector buf; + // model memory mapped file + void * mm_addr; + size_t mm_length; + // tensors int n_loaded; std::unordered_map tensors; @@ -296,22 +300,32 @@ struct llama_context_params llama_context_default_params() { // model loading // -static void * mmap_file(const char* fname) { +static void mmap_file(const char* fname, void * &mm_addr, size_t &mm_length) { #if defined(MAP_FAILED) - // POSIX mmap + // POSIX int fd = open(fname, O_RDONLY); - size_t len = lseek(fd, 0, SEEK_END); - void * mm_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); + mm_length = lseek(fd, 0, SEEK_END); + mm_addr = mmap(NULL, mm_length, PROT_READ, MAP_SHARED, fd, 0); + close(fd); if (mm_addr == MAP_FAILED) { perror("mmap failed"); mm_addr = NULL; + mm_length = 0; } - close(fd); - return mm_addr; #else // TODO: windows support (void)(fname); // suppress warnings - return NULL; +#endif +} + +static void munmap_file(void * addr, size_t length) { +#if defined(MAP_FAILED) + // POSIX + munmap(addr, length); +#else + // TODO: windows support + (void)(addr); // suppress warnings + (void)(length); #endif } @@ -480,12 +494,15 @@ static bool llama_model_load( bool use_mmap = (n_parts == 1); // try to memory map the model file - void* mm_addr = NULL; + void * mm_addr = NULL; if (use_mmap) { - mm_addr = mmap_file(fname.c_str()); - if (mm_addr == NULL) { + mmap_file(fname.c_str(), model.mm_addr, model.mm_length); + if (model.mm_addr == NULL) { use_mmap = false; } + else { + mm_addr = model.mm_addr; + } } auto & ctx = model.ctx; @@ -1750,6 +1767,10 @@ void llama_free(struct llama_context * ctx) { ggml_free(ctx->model.ctx); } + if (ctx->model.mm_addr) { + munmap_file(ctx->model.mm_addr, ctx->model.mm_length); + } + delete ctx; } From ac184d514723902f9b05b688703b1be6e8dc65de Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Wed, 29 Mar 2023 08:53:14 +0200 Subject: [PATCH 55/76] Always initialize mm_addr and mm_length in llama_model --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 0c220e4ae..aaf5f0ad5 100644 --- a/llama.cpp +++ b/llama.cpp @@ -150,8 +150,8 @@ struct llama_model { std::vector buf; // model memory mapped file - void * mm_addr; - size_t mm_length; + void * mm_addr = NULL; + size_t mm_length = 0; // tensors int n_loaded; From a017390358cdb23fffb30988dc84bb190d0403ca Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Wed, 29 Mar 2023 22:22:36 +0200 Subject: [PATCH 56/76] Initial windows support (untested) --- llama.cpp | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/llama.cpp b/llama.cpp index aaf5f0ad5..87633f972 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12,11 +12,15 @@ #include #include -// headers for POSIX mmap +// mmap #if defined (__unix__) || defined (__APPLE__) # include # include # include +#elif defined(_WIN32) +# define WIN32_LEAN_AND_MEAN +# include +//#include #endif #define LLAMA_USE_SCRATCH @@ -312,8 +316,31 @@ static void mmap_file(const char* fname, void * &mm_addr, size_t &mm_length) { mm_addr = NULL; mm_length = 0; } +#elif defined(_WIN32) + mm_addr = NULL; + + HANDLE hFile = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + if (hFile == INVALID_HANDLE_VALUE) { + return; + } + + // not really necessary + LARGE_INTEGER fileSize; + GetFileSizeEx(hFile, &fileSize); + mm_length = fileSize; + + HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); + CloseHandle(hFile); + + if (hMapping == NULL) { + return; + } + + mm_addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + CloseHandle(hMapping); #else - // TODO: windows support + mm_addr = NULL; + mm_length = 0; (void)(fname); // suppress warnings #endif } @@ -322,8 +349,9 @@ static void munmap_file(void * addr, size_t length) { #if defined(MAP_FAILED) // POSIX munmap(addr, length); +#elif defined(_WIN32) + UnmapViewOfFile(addr); #else - // TODO: windows support (void)(addr); // suppress warnings (void)(length); #endif From 78ca9838ee36660a776e97e3391b6fb5dcaacf7f Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Wed, 29 Mar 2023 13:51:37 -0700 Subject: [PATCH 57/76] Make loading weights 10-100x faster This is a breaking change that's going to give you three benefits: 1. Your inference commands should load 100x faster 2. You may be able to safely load models 2x larger 3. You can run many concurrent inference processes This was accomplished by changing the file format so we can mmap() weights directly into memory without having to read() or copy them thereby ensuring the kernel can make its file cache pages directly accessible to our inference processes; and secondly, that the file cache pages are much less likely to get evicted (which would force loads to hit disk) because they're no longer competing with memory pages that were needlessly created by gigabytes of standard i/o. The new file format supports single-file models like LLaMA 7b, and it also supports multi-file models like LLaMA 13B. Our Python tool now merges the foo.1, foo.2, etc. files back into a single file so that the C++ code which maps it doesn't need to reshape data every time. That's made llama.cpp so much simpler. Much of its load code has now been deleted. Furthermore, this change ensures that tensors are aligned properly on a 32-byte boundary. That opens the door to seeing if we can get additional performance gains on some microprocessors, by using ops that require memory alignment. Lastly note that both POSIX and the Windows platform are supported Fixes #91 --- .gitignore | 1 + convert-ggml-to-pth.py | 5 + convert-gptq-to-ggml.py | 5 + convert-pth-to-ggml.py | 195 ++++++++++++---- llama.cpp | 503 ++++++++++++++-------------------------- llama.h | 2 +- models/ggml-vocab.bin | Bin 432610 -> 432610 bytes 7 files changed, 336 insertions(+), 375 deletions(-) diff --git a/.gitignore b/.gitignore index 741c6b4ea..1c75d38d1 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ models/* /result /perplexity /embedding +/Pipfile arm_neon.h compile_commands.json diff --git a/convert-ggml-to-pth.py b/convert-ggml-to-pth.py index 20158c9ca..8ab17410d 100644 --- a/convert-ggml-to-pth.py +++ b/convert-ggml-to-pth.py @@ -84,6 +84,11 @@ def read_variables(fin): shape = shape[::-1] name = fin.read(name_length).decode("utf-8") + # ensure tensor data is aligned + tensor_data_offset = fin.tell() + tensor_data_offset = (tensor_data_offset + 31) & -32 + fin.seek(tensor_data_offset) + if ftype_cur == 2: # 4-bit quantized weights dtype = np.uint8 diff --git a/convert-gptq-to-ggml.py b/convert-gptq-to-ggml.py index 6c77808fc..860eb148b 100644 --- a/convert-gptq-to-ggml.py +++ b/convert-gptq-to-ggml.py @@ -72,6 +72,11 @@ def write_header(shape, dst_name, ftype_cur): fout.write(struct.pack("i" * len(shape), *shape[::-1])) fout.write(sname) + # ensure tensor data is aligned + tensor_data_offset = fout.tell() + tensor_data_offset = (tensor_data_offset + 31) & -32 + fout.seek(tensor_data_offset) + def convert_non_q4(src_name, dst_name): v = model[src_name] shape = v.shape diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index d83f8a137..7d461157b 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -24,8 +24,57 @@ import torch from sentencepiece import SentencePieceProcessor -def parse_args(): +QK = 32 +GGML_TYPE_Q4_0 = 0 +GGML_TYPE_Q4_1 = 1 +GGML_TYPE_I8 = 2 +GGML_TYPE_I16 = 3 +GGML_TYPE_I32 = 4 +GGML_TYPE_F16 = 5 +GGML_TYPE_F32 = 6 + +WTYPES = { + 0: GGML_TYPE_F32, + 1: GGML_TYPE_F16, + 2: GGML_TYPE_Q4_0, + 3: GGML_TYPE_Q4_1, +} + +GGML_BLCK_SIZE = { + GGML_TYPE_Q4_0: QK, + GGML_TYPE_Q4_1: QK, + GGML_TYPE_I8: 1, + GGML_TYPE_I16: 1, + GGML_TYPE_I32: 1, + GGML_TYPE_F16: 1, + GGML_TYPE_F32: 1, +} + +GGML_TYPE_SIZE = { + GGML_TYPE_Q4_0: 4 + QK/2, + GGML_TYPE_Q4_1: 4*2 + QK/2, + GGML_TYPE_I8: 1, + GGML_TYPE_I16: 2, + GGML_TYPE_I32: 4, + GGML_TYPE_F16: 2, + GGML_TYPE_F32: 4, +} + +def ggml_nelements(shape): + r = 1 + for i in shape: + r *= i + return r + +def ggml_nbytes(shape, ftype): + x = ggml_nelements(shape) + t = WTYPES[ftype] + x *= GGML_TYPE_SIZE[t] + x //= GGML_BLCK_SIZE[t] + return x + +def parse_args(): parser = argparse.ArgumentParser(description='Convert a LLaMA model checkpoint to a ggml compatible file') parser.add_argument('dir_model', help='directory containing the model checkpoint') parser.add_argument('ftype', help='file type (0: float32, 1: float16)', type=int, choices=[0, 1], default=1) @@ -33,7 +82,6 @@ def parse_args(): return parser.parse_args() def get_n_parts(dim): - mappings = {4096: 1, 5120: 2, 6656: 4, 8192: 8} n_parts = mappings.get(dim) if n_parts is None: @@ -44,30 +92,24 @@ def get_n_parts(dim): return n_parts def load_hparams_and_tokenizer(dir_model): - # `dir_model` is something like `models/7B` or `models/7B/`. # "tokenizer.model" is expected under model's parent dir. # When `dir_model` is a symlink, f"{dir_model}/../tokenizer.model" would not be found. # Let's use the model's parent dir directly. model_parent_dir = os.path.dirname(os.path.normpath(dir_model)) - fname_hparams = f"{dir_model}/params.json" fname_tokenizer = f"{model_parent_dir}/tokenizer.model" - with open(fname_hparams, "r") as f: hparams = json.load(f) print(hparams) - tokenizer = SentencePieceProcessor(fname_tokenizer) hparams.update({"vocab_size": tokenizer.vocab_size()}) - return hparams, tokenizer def write_header(fout, hparams, ftype): - keys = ["vocab_size", "dim", "multiple_of", "n_heads", "n_layers"] values = [ - 0x67676d66, # magic: ggmf in hex + 0x67676a74, # magic: ggjt in hex 1, # file version *[hparams[key] for key in keys], hparams["dim"] // hparams["n_heads"], # rot (obsolete) @@ -76,7 +118,6 @@ def write_header(fout, hparams, ftype): fout.write(struct.pack("i" * len(values), *values)) def write_tokens(fout, tokenizer): - for i in range(tokenizer.vocab_size()): if tokenizer.is_unknown(i): text = " \u2047 ".encode("utf-8") @@ -95,85 +136,141 @@ def write_tokens(fout, tokenizer): fout.write(text) fout.write(struct.pack("f", tokenizer.get_score(i))) -def process_and_write_variables(fout, model, ftype): - +def process_and_write_variables(fout, model, ftype, part_id, n_parts): for name, datao in model.items(): - if name.endswith("freqs"): continue - shape = datao.shape - - print(f"Processing variable: {name} with shape: {shape} and type: {datao.dtype}") - + # remove dimensions with a single element data = datao.numpy().squeeze() - n_dims = len(shape) + partshape = data.shape + n_dims = len(data.shape) + assert n_dims in (1, 2) - # default type is fp16 + print(f"Processing variable: {name} with shape: {partshape} and type: {datao.dtype}") + + # coerce single-dimensional tensors from float16 to float32 ftype_cur = 1 if ftype == 0 or n_dims == 1: print(" Converting to float32") data = data.astype(np.float32) ftype_cur = 0 + blck_size = GGML_BLCK_SIZE[WTYPES[ftype_cur]] + type_size = GGML_TYPE_SIZE[WTYPES[ftype_cur]] - # header + # determine dimension along which multipart tensor is sharded + # + # split_dim 0 regex: + # - output.* + # - layers.*.attention.wq.weight + # - layers.*.attention.wk.weight + # - layers.*.attention.wv.weight + # - layers.*.feed_forward.w1.weight + # - layers.*.feed_forward.w3.weight + # + # split_dim 1 regex: + # - tok_embeddings.* + # - layers.*.attention.wo.weight + # - layers.*.feed_forward.w2.weight + # + if n_dims > 1: + split_dim = 1 + if "tok_embeddings" in name: + split_dim = 1 + elif "layers" in name: + if "attention.wo.weight" in name: + split_dim = 1 + elif "feed_forward.w2.weight" in name: + split_dim = 1 + else: + split_dim = 0 + elif "output" in name: + split_dim = 0 + + # output tensor header + fullshape = list(partshape) + if n_dims > 1: + fullshape[split_dim] *= n_parts sname = name.encode('utf-8') - fout.write(struct.pack("iii", len(data.shape), len(sname), ftype_cur)) - for dim in reversed(data.shape): + fout.write(struct.pack("iii", n_dims, len(sname), ftype_cur)) + for dim in reversed(fullshape): fout.write(struct.pack("i", dim)) fout.write(sname) - # data output to file - data.tofile(fout) + # ensure tensor data is aligned + tensor_data_offset = fout.tell() + while tensor_data_offset % QK != 0: + fout.write(struct.pack("B", 0)) + tensor_data_offset += 1 + + # output unified mappable tensor data + if n_dims == 1 or n_parts == 1: + # copy tensor which we thankfully received in one piece + if part_id == 0: + data.tofile(fout) + elif split_dim == 0: + # reassemble multifile tensor containing some of the rows + rows_per_chunk = partshape[0] + current_row = part_id * rows_per_chunk + bytes_per_row = fullshape[1] // blck_size * type_size + offset = current_row * bytes_per_row + fout.seek(tensor_data_offset + offset) + data.tofile(fout) + elif split_dim == 1: + # reassemble multifile tensor containing some of the cols + cols_per_chunk = partshape[1] + current_col = part_id * cols_per_chunk + bytes_per_row = fullshape[1] // blck_size * type_size + offset_current_col = current_col // blck_size * type_size + for row in range(partshape[0]): + offset_row = row * bytes_per_row + offset = offset_row + offset_current_col + fout.seek(tensor_data_offset + offset) + data[row].tofile(fout) + + # advance file position to next tensor + fout.seek(tensor_data_offset + ggml_nbytes(fullshape, ftype_cur)) def main(): - args = parse_args() dir_model = args.dir_model ftype = args.ftype ftype_str = ["f32", "f16"] - hparams, tokenizer = load_hparams_and_tokenizer(dir_model) print(args) # if only writing vocab to file if args.vocab_only: - fname_model = f"{dir_model}/consolidated.00.pth" fname_out = f"{dir_model}/ggml-vocab.bin" - print(f"Extracting only the vocab from '{fname_model}'\n") - - + model = torch.load(fname_model, map_location="cpu") with open(fname_out, "wb") as fout: write_header(fout, hparams, ftype) write_tokens(fout, tokenizer) - - + del model print(f"Done. Output file: {fname_out}\n") - return n_parts = get_n_parts(hparams["dim"]) + fname_out = f"{dir_model}/ggml-model-{ftype_str[ftype]}.bin" - for p in range(n_parts): + # we output a single file for ggml + with open(fname_out, "wb") as fout: + write_header(fout, hparams, ftype) + write_tokens(fout, tokenizer) + offset_of_tensors = fout.tell() + # the tensors we load could be split across multiple files + for part_id in range(n_parts): + fout.seek(offset_of_tensors) + print(f"Processing part {part_id+1} of {n_parts}\n") + fname_model = f"{dir_model}/consolidated.0{part_id}.pth" + model = torch.load(fname_model, map_location="cpu") + process_and_write_variables(fout, model, ftype, part_id, n_parts) + del model - print(f"Processing part {p+1} of {n_parts}\n") - - fname_model = f"{dir_model}/consolidated.0{p}.pth" - fname_out = f"{dir_model}/ggml-model-{ftype_str[ftype]}.bin{'' if p == 0 else '.' + str(p)}" - - model = torch.load(fname_model, map_location="cpu") - - with open(fname_out, "wb") as fout: - write_header(fout, hparams, ftype) - write_tokens(fout, tokenizer) - process_and_write_variables(fout, model, ftype) - - del model - - print(f"Done. Output file: {fname_out}, (part {p})\n") + print(f"Done. Output file: {fname_out}\n") if __name__ == "__main__": main() diff --git a/llama.cpp b/llama.cpp index 87633f972..b00e06523 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12,17 +12,19 @@ #include #include -// mmap -#if defined (__unix__) || defined (__APPLE__) -# include -# include -# include -#elif defined(_WIN32) -# define WIN32_LEAN_AND_MEAN -# include -//#include +#if defined(_WIN32) && !defined(_POSIX_MAPPED_FILES) +#define WIN32_LEAN_AND_MEAN +#include +#else +#include +#include +#include +#include #endif +#define Min(X, Y) ((Y) > (X) ? (X) : (Y)) +#define Max(X, Y) ((Y) < (X) ? (X) : (Y)) + #define LLAMA_USE_SCRATCH #define LLAMA_MAX_SCRATCH_BUFFERS 16 @@ -155,7 +157,7 @@ struct llama_model { // model memory mapped file void * mm_addr = NULL; - size_t mm_length = 0; + uint64_t mm_length = 0; // tensors int n_loaded; @@ -180,6 +182,7 @@ struct llama_context { int64_t t_load_us = 0; int64_t t_start_us = 0; + bool has_evaluated_once = false; int64_t t_sample_us = 0; int64_t t_eval_us = 0; @@ -221,7 +224,7 @@ struct llama_context { } if (buf_last >= 0) { - buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size); + buf_max_size[buf_last] = Max(buf_max_size[buf_last], last_size); } buf_last = i; @@ -304,59 +307,57 @@ struct llama_context_params llama_context_default_params() { // model loading // -static void mmap_file(const char* fname, void * &mm_addr, size_t &mm_length) { -#if defined(MAP_FAILED) - // POSIX - int fd = open(fname, O_RDONLY); - mm_length = lseek(fd, 0, SEEK_END); - mm_addr = mmap(NULL, mm_length, PROT_READ, MAP_SHARED, fd, 0); - close(fd); - if (mm_addr == MAP_FAILED) { - perror("mmap failed"); - mm_addr = NULL; - mm_length = 0; - } -#elif defined(_WIN32) - mm_addr = NULL; - - HANDLE hFile = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); - if (hFile == INVALID_HANDLE_VALUE) { - return; - } - - // not really necessary +static void *mmap_file(const char *fname, uint64_t *mm_length) { +#if defined(_WIN32) && !defined(_POSIX_MAPPED_FILES) + HANDLE hFile = CreateFileA(fname, + GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED, + NULL); + if (hFile == INVALID_HANDLE_VALUE) return 0; LARGE_INTEGER fileSize; + fileSize.QuadPart = -1; GetFileSizeEx(hFile, &fileSize); - mm_length = fileSize; - + int64_t length = fileSize.QuadPart; HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); CloseHandle(hFile); - - if (hMapping == NULL) { - return; - } - - mm_addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + if (!hMapping) return 0; + void *addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); CloseHandle(hMapping); + if (!addr) return 0; #else - mm_addr = NULL; - mm_length = 0; - (void)(fname); // suppress warnings + int fd = open(fname, O_RDONLY); + if (fd == -1) return 0; + int64_t length = lseek(fd, 0, SEEK_END); + void *addr = mmap(NULL, length, PROT_READ, MAP_SHARED, fd, 0); + close(fd); + if (addr == MAP_FAILED) return 0; #endif + *mm_length = length; + return addr; } static void munmap_file(void * addr, size_t length) { -#if defined(MAP_FAILED) - // POSIX - munmap(addr, length); -#elif defined(_WIN32) +#if defined(_WIN32) && !defined(_POSIX_MAPPED_FILES) UnmapViewOfFile(addr); #else - (void)(addr); // suppress warnings - (void)(length); + munmap(addr, length); #endif } +static bool report_bad_magic(const char *path) { + fprintf(stderr, + "%s: invalid model file (bad magic)\n" + "you most likely need to regenerate your ggml files\n" + "the benefit is you'll get 10-100x faster load times\n" + "see https://github.com/ggerganov/llama.cpp/issues/91\n" + "use convert-pth-to-ggml.py on your llama model files\n", + path); + return false; +} + static bool llama_model_load( const std::string & fname, llama_context & lctx, @@ -368,23 +369,24 @@ static bool llama_model_load( void *progress_callback_user_data) { fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); - const int64_t t_start_us = ggml_time_us(); - - lctx.t_start_us = t_start_us; - - // TODO: this could probably be smaller when using mmap - std::vector f_buf(1024*1024); + lctx.t_start_us = ggml_time_us(); auto & model = lctx.model; auto & vocab = lctx.vocab; auto fin = std::ifstream(fname, std::ios::binary); - fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } + std::vector f_buf(1024*1024); + fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); + + fin.seekg(0, fin.end); + const size_t file_size = fin.tellg(); + fin.seekg(0); + // verify magic { uint32_t magic; @@ -395,8 +397,7 @@ static bool llama_model_load( return false; } if (magic != LLAMA_FILE_MAGIC) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); - return false; + return report_bad_magic(fname.c_str()); } uint32_t format_version; @@ -519,54 +520,24 @@ static bool llama_model_load( } } - bool use_mmap = (n_parts == 1); - - // try to memory map the model file - void * mm_addr = NULL; - if (use_mmap) { - mmap_file(fname.c_str(), model.mm_addr, model.mm_length); - if (model.mm_addr == NULL) { - use_mmap = false; - } - else { - mm_addr = model.mm_addr; - } + // map model into memory + char *mm_addr = NULL; + model.mm_addr = mmap_file(fname.c_str(), &model.mm_length); + if (model.mm_addr == NULL) { + fprintf(stderr, "%s: failed to mmap '%s'\n", __func__, fname.c_str()); + return false; } + mm_addr = (char *)model.mm_addr; + fprintf(stderr, "%s: ggml map size = %6.2f MB\n", __func__, model.mm_length/(1024.0*1024.0)); auto & ctx = model.ctx; size_t ctx_size = 0; { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; + const auto &hparams = model.hparams; const int n_layer = hparams.n_layer; - const int n_vocab = hparams.n_vocab; - - if (!use_mmap) { - ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings - - ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm - - ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output - - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm - - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo - - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm - - ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1 - ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2 - ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3 - } - ctx_size += (5 + 10*n_layer)*256; // object overhead - - fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); + fprintf(stderr, "%s: ggml ctx size = %6.2f KB\n", __func__, ctx_size/1024.0); } // print memory requirements @@ -576,6 +547,7 @@ static bool llama_model_load( // this is the total memory required to run the inference const size_t mem_required = ctx_size + + model.mm_length + MEM_REQ_SCRATCH0.at(model.type) + MEM_REQ_SCRATCH1.at(model.type) + MEM_REQ_EVAL.at (model.type); @@ -595,7 +567,7 @@ static bool llama_model_load( struct ggml_init_params params = { /*.mem_size =*/ lctx.model.buf.size(), /*.mem_buffer =*/ lctx.model.buf.data(), - /*.no_alloc =*/ use_mmap, + /*.no_alloc =*/ true, }; model.ctx = ggml_init(params); @@ -658,241 +630,106 @@ static bool llama_model_load( } } - const size_t file_offset = fin.tellg(); - - fin.close(); - std::vector tmp; if (progress_callback) { progress_callback(0.0, progress_callback_user_data); } - for (int i = 0; i < n_parts; ++i) { - const int part_id = i; - //const int part_id = n_parts - i - 1; + fprintf(stderr, "%s: loading tensors from '%s'\n", __func__, fname.c_str()); - std::string fname_part = fname; - if (i > 0) { - fname_part += "." + std::to_string(i); - } + // load weights + { + size_t total_size = 0; + model.n_loaded = 0; - fprintf(stderr, "%s: loading model part %d/%d from '%s'%s\n", __func__, i+1, n_parts, fname_part.c_str(), use_mmap ? " (memory mapped)" : ""); + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; - fin = std::ifstream(fname_part, std::ios::binary); - fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); + fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + fin.read(reinterpret_cast(&length), sizeof(length)); + fin.read(reinterpret_cast(&ftype), sizeof(ftype)); - fin.seekg(0, fin.end); - const size_t file_size = fin.tellg(); - - fin.seekg(file_offset); - - // load weights - { - size_t total_size = 0; - - model.n_loaded = 0; - - fprintf(stderr, "%s: ", __func__); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (fin.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - fin.read(&name[0], length); - - if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); - return false; - } - - // split_type = 0: split by columns - // split_type = 1: split by rows - int split_type = 0; - - // split_type = 0: - // regex: - // - tok_embeddings.* - // - layers.*.attention.wo.weight - // - layers.*.feed_forward.w2.weight - - // split_type = 1: - // regex: - // - output.* - // - layers.*.attention.wq.weight - // - layers.*.attention.wk.weight - // - layers.*.attention.wv.weight - // - layers.*.feed_forward.w1.weight - // - layers.*.feed_forward.w3.weight - if (name.find("tok_embeddings") != std::string::npos) { - split_type = 0; - } else if (name.find("layers") != std::string::npos) { - if (name.find("attention.wo.weight") != std::string::npos) { - split_type = 0; - } else if (name.find("feed_forward.w2.weight") != std::string::npos) { - split_type = 0; - } else { - split_type = 1; - } - } else if (name.find("output") != std::string::npos) { - split_type = 1; - } - - auto tensor = model.tensors[name.data()]; - - if (n_dims == 1) { - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - } else { - if (ggml_nelements(tensor)/n_parts != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - } - - if (n_dims == 1) { - if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); - return false; - } - } else { - if (split_type == 0) { - if (tensor->ne[0]/n_parts != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0]/n_parts, tensor->ne[1], ne[0], ne[1]); - return false; - } - } else { - if (tensor->ne[0] != ne[0] || tensor->ne[1]/n_parts != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1]/n_parts, ne[0], ne[1]); - return false; - } - } - } - - if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - fprintf(stderr, "%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type); - } - - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return false; - } - }; - - if (n_dims == 1 || n_parts == 1) { - if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); - return false; - } - - if (part_id == 0) { - if (mm_addr) { - off_t offset = fin.tellg(); - tensor->data = (char *) mm_addr + offset; - fin.seekg(ggml_nbytes(tensor), std::ios::cur); - } - else { - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - } - } else { - fin.seekg(ggml_nbytes(tensor), std::ios::cur); - } - - total_size += ggml_nbytes(tensor); - } else { - if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)/n_parts) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor)/n_parts, nelements*bpe); - return false; - } - - if (split_type == 0) { - const int np0 = ne[0]; - - const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type); - assert(row_size == tensor->nb[1]); - - for (int i1 = 0; i1 < ne[1]; ++i1) { - const size_t offset_row = i1*row_size; - const size_t offset = offset_row + ((part_id*np0)/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type); - fin.read(reinterpret_cast(tensor->data) + offset, row_size/n_parts); - } - } else { - const int np1 = ne[1]; - - const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type); - - for (int i1 = 0; i1 < ne[1]; ++i1) { - const size_t offset_row = (i1 + part_id*np1)*row_size; - fin.read(reinterpret_cast(tensor->data) + offset_row, row_size); - } - } - - total_size += ggml_nbytes(tensor)/n_parts; - } - - //fprintf(stderr, "%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); - model.n_loaded++; - - // progress - if (progress_callback) { - float current_file_progress = float(size_t(fin.tellg()) - file_offset) / float(file_size - file_offset); - float current_progress = (float(i) + current_file_progress) / float(n_parts); - progress_callback(current_progress, progress_callback_user_data); - } - if (model.n_loaded % 8 == 0) { - fprintf(stderr, "."); - fflush(stderr); - } + if (fin.eof()) { + break; } - fprintf(stderr, " done\n"); + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } - fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, model.n_loaded); - if (model.n_loaded == 0) { - fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__); - } else if (model.n_loaded != (int) model.tensors.size()) { - fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded); + std::string name(length, 0); + fin.read(&name[0], length); + + if (model.tensors.find(name.data()) == model.tensors.end()) { + fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); return false; } + + auto tensor = model.tensors[name.data()]; + + if (ggml_nelements(tensor) != nelements) { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + return false; + } + if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + return false; + } + if (0) { + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + fprintf(stderr, "%24s - [%5d, %5d], type = %6s\n", name.data(), ne[0], ne[1], ftype_str[ftype]); + } + + switch (ftype) { + case 0: // f32 + case 1: // f16 + break; + case 2: // q4_0 + case 3: // q4_1 + assert(ne[0] % 64 == 0); + break; + default: + fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); + return false; + }; + + // load the tensor data into memory without copying or reading it + size_t offset = fin.tellg(); + size_t tensor_data_size = ggml_nbytes(tensor); + offset = (offset + 31) & -32; + tensor->data = mm_addr + offset; + fin.seekg(offset + tensor_data_size); + total_size += tensor_data_size; + model.n_loaded++; + + // progress + if (progress_callback) { + double current_progress = size_t(fin.tellg()) / double(file_size); + progress_callback(current_progress, progress_callback_user_data); + } } fin.close(); + + fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, model.n_loaded); + if (model.n_loaded == 0) { + fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__); + } else if (model.n_loaded != (int) model.tensors.size()) { + fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded); + return false; + } } - lctx.t_load_us = ggml_time_us() - t_start_us; + // loading time will be recalculate after the first eval, so + // we take page faults deferred by mmap() into consideration + lctx.t_load_us = ggml_time_us() - lctx.t_start_us; if (progress_callback) { progress_callback(1.0, progress_callback_user_data); @@ -1216,7 +1053,7 @@ struct llama_tokenizer { size_t offs = 0; while (offs < text.size()) { llama_sp_symbol sym; - size_t char_len = std::min(text.size() - offs, utf8_len(text[offs])); + size_t char_len = Min(text.size() - offs, utf8_len(text[offs])); sym.text = text.c_str() + offs; sym.n = char_len; offs += char_len; @@ -1381,7 +1218,7 @@ static llama_vocab::id llama_sample_top_p_top_k( float maxl = -std::numeric_limits::infinity(); for (const auto & kv : logits_id) { - maxl = std::max(maxl, kv.first); + maxl = Max(maxl, kv.first); } // compute probs for the top k tokens @@ -1475,8 +1312,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s return false; } if (magic != LLAMA_FILE_MAGIC) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); - return false; + return report_bad_magic(fname_inp.c_str()); } fout.write((char *) &magic, sizeof(magic)); @@ -1542,8 +1378,8 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s fout.write((char *) &len, sizeof(len)); word.resize(len); - finp.read ((char *) word.data(), len); - fout.write((char *) word.data(), len); + finp.read ((char *) &word[0], len); + fout.write((char *) &word[0], len); float score; finp.read ((char *) &score, sizeof(score)); @@ -1593,6 +1429,13 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s std::string name(length, 0); finp.read (&name[0], length); + { + // ensure tensor data is aligned + uint64_t offset = finp.tellg(); + offset = (offset + 31) & -32; + finp.seekg(offset); + } + { static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); @@ -1648,6 +1491,13 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s } fout.write(&name[0], length); + { + // ensure tensor data is aligned + uint64_t offset = fout.tellp(); + offset = (offset + 31) & -32; + fout.seekp(offset); + } + if (quantize) { printf("quantizing .. "); work.resize(nelements); // for quantization @@ -1824,7 +1674,11 @@ int llama_eval( fprintf(stderr, "%s: failed to eval\n", __func__); return 1; } - + // get a more accurate load time, upon first eval + if (!ctx->has_evaluated_once) { + ctx->t_load_us = ggml_time_us() - ctx->t_start_us; + ctx->has_evaluated_once = true; + } return 0; } @@ -1917,9 +1771,9 @@ llama_token llama_sample_top_p_top_k( void llama_print_timings(struct llama_context * ctx) { const int64_t t_end_us = ggml_time_us(); - const int32_t n_sample = std::max(1, ctx->n_sample); - const int32_t n_eval = std::max(1, ctx->n_eval); - const int32_t n_p_eval = std::max(1, ctx->n_p_eval); + const int32_t n_sample = Max(1, ctx->n_sample); + const int32_t n_eval = Max(1, ctx->n_eval); + const int32_t n_p_eval = Max(1, ctx->n_p_eval); fprintf(stderr, "\n"); fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0); @@ -1931,7 +1785,6 @@ void llama_print_timings(struct llama_context * ctx) { void llama_reset_timings(struct llama_context * ctx) { ctx->t_start_us = ggml_time_us(); - ctx->t_sample_us = ctx->n_sample = 0; ctx->t_eval_us = ctx->n_eval = 0; ctx->t_p_eval_us = ctx->n_p_eval = 0; diff --git a/llama.h b/llama.h index 3368de3e0..258de5a94 100644 --- a/llama.h +++ b/llama.h @@ -20,7 +20,7 @@ #endif #define LLAMA_FILE_VERSION 1 -#define LLAMA_FILE_MAGIC 0x67676d66 // 'ggmf' in hex +#define LLAMA_FILE_MAGIC 0x67676a74 // 'ggjt' in hex #define LLAMA_FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files #ifdef __cplusplus diff --git a/models/ggml-vocab.bin b/models/ggml-vocab.bin index 3651f708e80eaa74f2f0004bbcfd8744b15e48e0..38f63493a97a7e85ef04a21697f7d2989156e5e4 100644 GIT binary patch delta 31 lcmaE~S?bYdDW;OFMy6IK##SaE$=u4s(#piTm5J@aOaQF#390}9 delta 31 lcmaE~S?bYdDW Date: Thu, 30 Mar 2023 01:53:36 -0700 Subject: [PATCH 58/76] Ensure --mlock works properly with mmap() support --- ggml.c | 39 +++++++++++++++++++++++++-------------- ggml.h | 6 +++++- llama.cpp | 5 ++++- 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/ggml.c b/ggml.c index 4ea715957..25fa72632 100644 --- a/ggml.c +++ b/ggml.c @@ -2884,36 +2884,47 @@ size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) return result; } +#ifdef __APPLE__ +#define MLOCK_SUGGESTION \ + "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ + "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" +#else +#define MLOCK_SUGGESTION \ + "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" +#endif + bool ggml_mlock_supported(void) { return GGML_MLOCK_SUPPORT; } +bool ggml_mlock( + struct ggml_context * ctx, + const void *opt_extra_addr, + size_t opt_extra_len, + char **err_p) { + // TODO: Use SetProcessWorkingSetSize() + VirtualLock() on WIN32 #if GGML_MLOCK_SUPPORT -#ifdef __APPLE__ - #define MLOCK_SUGGESTION "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or\n" \ - "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l)." -#else - #define MLOCK_SUGGESTION "Try increasing RLIMIT_MLOCK (ulimit -l)." -#endif -bool ggml_mlock(struct ggml_context * ctx, char ** err_p) { if (ctx->mem_buffer_mlocked) { return true; } - if (mlock(ctx->mem_buffer, ctx->mem_size)) { - int ret = asprintf(err_p, "failed to mlock %zu-byte buffer: %s\n" MLOCK_SUGGESTION, - ctx->mem_size, strerror(errno)); - GGML_ASSERT(ret >= 0); + if (mlock(ctx->mem_buffer, ctx->mem_size) || + (opt_extra_len && + mlock(opt_extra_addr, opt_extra_len))) { + if ((*err_p = malloc(1024))) { + snprintf(*err_p, 1024, + "failed to mlock %zu-byte buffer: %s\n" MLOCK_SUGGESTION, + ctx->mem_size + opt_extra_len, + strerror(errno)); + } return false; } ctx->mem_buffer_mlocked = true; return true; -} #else // GGML_MLOCK_SUPPORT -bool ggml_mlock(struct ggml_context * ctx, char ** err_p) { *err_p = strdup("can't mlock because it's not supported on this system"); return false; -} #endif // GGML_MLOCK_SUPPORT +} //////////////////////////////////////////////////////////////////////////////// diff --git a/ggml.h b/ggml.h index 058dfe230..f7791ed11 100644 --- a/ggml.h +++ b/ggml.h @@ -345,7 +345,11 @@ size_t ggml_used_mem(const struct ggml_context * ctx); size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch); bool ggml_mlock_supported(void); -bool ggml_mlock(struct ggml_context * ctx, char ** err_p); +bool ggml_mlock( + struct ggml_context * ctx, + const void *opt_extra_addr, + size_t opt_extra_len, + char **err_p); struct ggml_tensor * ggml_new_tensor( struct ggml_context * ctx, diff --git a/llama.cpp b/llama.cpp index b00e06523..28e885cef 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1595,7 +1595,10 @@ struct llama_context * llama_init_from_file( if (params.use_mlock) { char *err; - if (!ggml_mlock(ctx->model.ctx, &err)) { + if (!ggml_mlock(ctx->model.ctx, + ctx->model.mm_addr, + ctx->model.mm_length, + &err)) { fprintf(stderr, "%s\n", err); free(err); llama_free(ctx); From ee0c40dd6de8c3c658ae43199939ef40bb1cf408 Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Thu, 30 Mar 2023 05:42:56 -0700 Subject: [PATCH 59/76] Introduce GGML migration tool for new file format If you deleted your old Meta LLaMA .pth files, then the migrate-ggml-2023-03-30-pr613.py script will allow you to convert your old ggml files into the new mmap()'able format. See #613 --- convert-pth-to-ggml.py | 8 +- llama.cpp | 19 +- migrate-ggml-2023-03-30-pr613.py | 313 +++++++++++++++++++++++++++++++ 3 files changed, 326 insertions(+), 14 deletions(-) create mode 100644 migrate-ggml-2023-03-30-pr613.py diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index 7d461157b..df42e76bd 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -1,4 +1,4 @@ -# Convert a LLaMA model checkpoint to a ggml compatible file +# Convert a LLaMA model checkpoint to a ggjt compatible file # # Load the model using Torch # Iterate over all variables and write them to a binary file. @@ -52,8 +52,8 @@ GGML_BLCK_SIZE = { } GGML_TYPE_SIZE = { - GGML_TYPE_Q4_0: 4 + QK/2, - GGML_TYPE_Q4_1: 4*2 + QK/2, + GGML_TYPE_Q4_0: 4 + QK//2, + GGML_TYPE_Q4_1: 4*2 + QK//2, GGML_TYPE_I8: 1, GGML_TYPE_I16: 2, GGML_TYPE_I32: 4, @@ -245,11 +245,9 @@ def main(): fname_model = f"{dir_model}/consolidated.00.pth" fname_out = f"{dir_model}/ggml-vocab.bin" print(f"Extracting only the vocab from '{fname_model}'\n") - model = torch.load(fname_model, map_location="cpu") with open(fname_out, "wb") as fout: write_header(fout, hparams, ftype) write_tokens(fout, tokenizer) - del model print(f"Done. Output file: {fname_out}\n") return diff --git a/llama.cpp b/llama.cpp index 28e885cef..bed24207d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -347,14 +347,15 @@ static void munmap_file(void * addr, size_t length) { #endif } -static bool report_bad_magic(const char *path) { +static bool report_bad_magic(const char *path, uint32_t got, uint32_t want) { fprintf(stderr, - "%s: invalid model file (bad magic)\n" - "you most likely need to regenerate your ggml files\n" - "the benefit is you'll get 10-100x faster load times\n" - "see https://github.com/ggerganov/llama.cpp/issues/91\n" - "use convert-pth-to-ggml.py on your llama model files\n", - path); + "%s: invalid model file (bad magic [got %#x want %#x])\n" + "\tyou most likely need to regenerate your ggml files\n" + "\tthe benefit is you'll get 10-100x faster load times\n" + "\tsee https://github.com/ggerganov/llama.cpp/issues/91\n" + "\tuse convert-pth-to-ggml.py to regenerate from original pth\n" + "\tuse migrate-ggml-2023-03-30-pr613.py if you deleted originals\n", + path, got, want); return false; } @@ -397,7 +398,7 @@ static bool llama_model_load( return false; } if (magic != LLAMA_FILE_MAGIC) { - return report_bad_magic(fname.c_str()); + return report_bad_magic(fname.c_str(), magic, LLAMA_FILE_MAGIC); } uint32_t format_version; @@ -1312,7 +1313,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s return false; } if (magic != LLAMA_FILE_MAGIC) { - return report_bad_magic(fname_inp.c_str()); + return report_bad_magic(fname_inp.c_str(), magic, LLAMA_FILE_MAGIC); } fout.write((char *) &magic, sizeof(magic)); diff --git a/migrate-ggml-2023-03-30-pr613.py b/migrate-ggml-2023-03-30-pr613.py new file mode 100644 index 000000000..5596f6c55 --- /dev/null +++ b/migrate-ggml-2023-03-30-pr613.py @@ -0,0 +1,313 @@ +# Migrate ggml file(s) with ggmf magic to ggml file with ggjt magic +# +# We caused a breaking change to the file format on 2023-03-30 in: +# https://github.com/ggerganov/llama.cpp/pull/613 +# +# (1) If you still have the Meta LLaMA .pth files, then close this +# file now; you can just run `convert-pth-to-ggml.py` again to +# migrate to the new format. The tool is easier to use too. It +# isn't necessary anymore to manage split output files because +# the new format always combines things into a single file. +# +# (2) If you deleted the Meta LLaMA .pth files due to save on disk +# space, then this tool is intended to help you. Please check +# out the instructions below. +# +# USAGE +# +# python migrate-ggml-2023-03-30-pr613.py INPUT OUTPUT +# +# PREREQUISITES +# +# pip install numpy +# cd llama.cpp +# make -j4 +# +# EXAMPLE (7B MODEL) +# +# # you can replace all the 'f16' with 'q4_0' if you're using quantized weights +# python migrate-ggml-2023-03-30-pr613.py models/7B/ggml-model-f16.bin models/7B/ggml-model-f16-ggjt.bin +# +# # check that it works +# ./main -m models/7B/ggml-model-f16-ggjt.bin -p 'Question: Do you love me?' +# +# # you can delete the old files +# rm -f models/7B/ggml-model-f16.bin +# mv models/7B/ggml-model-f16-ggjt.bin models/7B/ggml-model-f16.bin +# +# EXAMPLE (13B MODEL) +# +# # you can replace all the 'f16' with 'q4_0' if you're using quantized weights +# python migrate-ggml-2023-03-30-pr613.py models/13B/ggml-model-f16.bin models/13B/ggml-model-f16-ggjt.bin +# +# # check that it works +# ./main -m models/13B/ggml-model-f16-ggjt.bin -p 'Question: Do you love me?' +# +# # you can delete the old files +# rm -f models/13B/ggml-model-f16.bin* +# mv models/13B/ggml-model-f16-ggjt.bin models/13B/ggml-model-f16.bin +# + +import argparse +import os +import sys +import json +import struct +import numpy as np + +QK = 32 + +GGML_TYPE_Q4_0 = 0 +GGML_TYPE_Q4_1 = 1 +GGML_TYPE_I8 = 2 +GGML_TYPE_I16 = 3 +GGML_TYPE_I32 = 4 +GGML_TYPE_F16 = 5 +GGML_TYPE_F32 = 6 + +WTYPE_NAMES = { + 0: "F32", + 1: "F16", + 2: "Q4_0", + 3: "Q4_1", +} + +WTYPES = { + 0: GGML_TYPE_F32, + 1: GGML_TYPE_F16, + 2: GGML_TYPE_Q4_0, + 3: GGML_TYPE_Q4_1, +} + +GGML_BLCK_SIZE = { + GGML_TYPE_Q4_0: QK, + GGML_TYPE_Q4_1: QK, + GGML_TYPE_I8: 1, + GGML_TYPE_I16: 1, + GGML_TYPE_I32: 1, + GGML_TYPE_F16: 1, + GGML_TYPE_F32: 1, +} + +GGML_TYPE_SIZE = { + GGML_TYPE_Q4_0: 4 + QK//2, + GGML_TYPE_Q4_1: 4*2 + QK//2, + GGML_TYPE_I8: 1, + GGML_TYPE_I16: 2, + GGML_TYPE_I32: 4, + GGML_TYPE_F16: 2, + GGML_TYPE_F32: 4, +} + +HPARAMS = [ + 'magic', # int32 + 'version', # int32 + 'n_vocab', # int32 + 'n_embd', # int32 + 'n_mult', # int32 + 'n_head', # int32 + 'n_layer', # int32 + 'n_rot', # int32 + 'f16', # int32 +] + +def read_hparams(fin): + struct_fmt = "i" * len(HPARAMS) + struct_size = struct.calcsize(struct_fmt) + buf = fin.read(struct_size) + ints = struct.unpack(struct_fmt, buf) + hparams = dict(zip(HPARAMS, ints)) + return hparams + +def write_hparams(fout, hparams): + struct_fmt = "i" * len(HPARAMS) + struct_size = struct.calcsize(struct_fmt) + ints = [hparams[h] for h in HPARAMS] + fout.write(struct.pack(struct_fmt, *ints)) + +def read_tokens(fin, hparams): + tokens = [] + for i in range(hparams['n_vocab']): + len_b = fin.read(4) + (length,) = struct.unpack("i", len_b) + word = fin.read(length) + score_b = fin.read(4) + (score,) = struct.unpack("f", score_b) + tokens.append((word, score)) + return tokens + +def write_tokens(fout, tokens): + for word, score in tokens: + fout.write(struct.pack("i", len(word))) + fout.write(word) + fout.write(struct.pack("f", score)) + +def ggml_nelements(shape): + r = 1 + for i in shape: + r *= i + return r + +def ggml_nbytes(shape, ftype): + x = ggml_nelements(shape) + t = WTYPES[ftype] + x *= GGML_TYPE_SIZE[t] + x //= GGML_BLCK_SIZE[t] + return x + +def copy_tensors(fin, fout, part_id, n_parts): + while True: + + b = fin.read(4) + if not b: break + (n_dims,) = struct.unpack("i", b) + b = fin.read(4) + (length,) = struct.unpack("i", b) + b = fin.read(4) + (ftype,) = struct.unpack("i", b) + + assert n_dims in (1, 2) + + partshape = list(range(n_dims)) + for i in range(n_dims): + b = fin.read(4) + partshape[i] = struct.unpack("i", b)[0] + partshape = list(reversed(partshape)) + + name = fin.read(length) + data = fin.read(ggml_nbytes(partshape, ftype)) + + blck_size = GGML_BLCK_SIZE[WTYPES[ftype]] + type_size = GGML_TYPE_SIZE[WTYPES[ftype]] + + print(f"Processing tensor {name} with shape: {partshape} and type: {WTYPE_NAMES[ftype]}") + + # determine dimension along which multipart tensor is sharded + # + # split_dim 0 regex: + # - output.* + # - layers.*.attention.wq.weight + # - layers.*.attention.wk.weight + # - layers.*.attention.wv.weight + # - layers.*.feed_forward.w1.weight + # - layers.*.feed_forward.w3.weight + # + # split_dim 1 regex: + # - tok_embeddings.* + # - layers.*.attention.wo.weight + # - layers.*.feed_forward.w2.weight + # + if n_dims > 1: + split_dim = 1 + if b"tok_embeddings" in name: + split_dim = 1 + elif b"layers" in name: + if b"attention.wo.weight" in name: + split_dim = 1 + elif b"feed_forward.w2.weight" in name: + split_dim = 1 + else: + split_dim = 0 + elif b"output" in name: + split_dim = 0 + + # output tensor header + fullshape = list(partshape) + if n_dims > 1: + fullshape[split_dim] *= n_parts + fout.write(struct.pack("iii", n_dims, len(name), ftype)) + for dim in reversed(fullshape): + fout.write(struct.pack("i", dim)) + fout.write(name) + + # ensure tensor data is aligned + tensor_data_offset = fout.tell() + while tensor_data_offset % QK != 0: + fout.write(struct.pack("B", 0)) + tensor_data_offset += 1 + + # output unified mappable tensor data + if n_dims == 1 or n_parts == 1: + # copy tensor which we thankfully received in one piece + if part_id == 0: + fout.write(data) + elif split_dim == 0: + # reassemble multifile tensor containing some of the rows + rows_per_chunk = partshape[0] + current_row = part_id * rows_per_chunk + bytes_per_row = fullshape[1] // blck_size * type_size + offset = current_row * bytes_per_row + fout.seek(tensor_data_offset + offset) + fout.write(data) + elif split_dim == 1: + # reassemble multifile tensor containing some of the cols + cols_per_chunk = partshape[1] + current_col = part_id * cols_per_chunk + bpr = partshape[1] // blck_size * type_size + bytes_per_row = fullshape[1] // blck_size * type_size + offset_current_col = current_col // blck_size * type_size + for row in range(partshape[0]): + offset_row = row * bytes_per_row + offset = offset_row + offset_current_col + fout.seek(tensor_data_offset + offset) + fout.write(data[row * bpr:row * bpr + bpr]) + + # advance file position to next tensor + fout.seek(tensor_data_offset + ggml_nbytes(fullshape, ftype)) + +def parse_args(): + parser = argparse.ArgumentParser(description='Migrate from GGML to new GGJT file format') + parser.add_argument('fin_path', help='your old ggml file (leave out the .1 .2 etc.)') + parser.add_argument('fout_path', help='your new ggjt file name') + return parser.parse_args() + +def main(): + args = parse_args() + assert args.fin_path + assert args.fout_path + assert args.fin_path != args.fout_path + + with open(args.fin_path, "rb") as fin: + hparams = read_hparams(fin) + tokens = read_tokens(fin, hparams) + + if hparams['magic'] == 0x67676a74: # ggjt + print("%s: input ggml has already been converted to 'ggjt' magic\n" % + (args.fin_path)) + sys.exit(1) + + if hparams['magic'] != 0x67676d66: # ggmf + print("%s: input ggml file doesn't have expected 'ggmf' magic: %#x\n" % + (args.fin_path, hparams['magic'])) + sys.exit(1) + + hparams['magic'] = 0x67676a74 # ggjt + + # count number of multipart files by convention + n_parts = 1 + while True: + if os.path.exists("%s.%d" % (args.fin_path, n_parts)): + n_parts += 1 + else: + break + + # we output a single file for ggml + with open(args.fout_path, "wb") as fout: + write_hparams(fout, hparams) + write_tokens(fout, tokens) + offset_of_tensors = fout.tell() + # the tensors we load could be split across multiple files + for part_id in range(n_parts): + fout.seek(offset_of_tensors) + print(f"Processing part {part_id+1} of {n_parts}\n") + fin_path = args.fin_path + if part_id > 0: + fin_path += ".%d" % (part_id) + with open(fin_path, "rb") as fin: + read_tokens(fin, read_hparams(fin)) + copy_tensors(fin, fout, part_id, n_parts) + + print(f"Done. Output file: {args.fout_path}\n") + +if __name__ == "__main__": + main() From 3df890aef432ce68143cfafcd7caf828bc4c3e55 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 30 Mar 2023 22:31:54 +0300 Subject: [PATCH 60/76] readme : update supported models --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e30452ee0..cefcfb7ca 100644 --- a/README.md +++ b/README.md @@ -37,9 +37,11 @@ Supported platforms: Supported models: -- [X] LLaMA +- [X] LLaMA 🦙 - [X] [Alpaca](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca) - [X] [GPT4All](https://github.com/ggerganov/llama.cpp#using-gpt4all) +- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) +- [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne) --- From 9733104be5389ebb1ff05095eca2a70280cd875a Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Fri, 31 Mar 2023 00:52:06 +0200 Subject: [PATCH 61/76] drop quantize.py (now that models are using a single file) --- README.md | 4 +- quantize.py | 131 ---------------------------------------------------- 2 files changed, 2 insertions(+), 133 deletions(-) delete mode 100644 quantize.py diff --git a/README.md b/README.md index cefcfb7ca..07066cd81 100644 --- a/README.md +++ b/README.md @@ -155,8 +155,8 @@ python3 -m pip install torch numpy sentencepiece # convert the 7B model to ggml FP16 format python3 convert-pth-to-ggml.py models/7B/ 1 -# quantize the model to 4-bits -python3 quantize.py 7B +# quantize the model to 4-bits (using method 2 = q4_0) +./quantize ./models/7B/ggml-model-f16.bin ./models/7B/ggml-model-q4_0.bin 2 # run the inference ./main -m ./models/7B/ggml-model-q4_0.bin -n 128 diff --git a/quantize.py b/quantize.py deleted file mode 100644 index 641df8dda..000000000 --- a/quantize.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python3 - -"""Script to execute the "quantize" script on a given set of models.""" - -import subprocess -import argparse -import glob -import sys -import os - - -def main(): - """Update the quantize binary name depending on the platform and parse - the command line arguments and execute the script. - """ - - if "linux" in sys.platform or "darwin" in sys.platform: - quantize_script_binary = "quantize" - - elif "win32" in sys.platform or "cygwin" in sys.platform: - quantize_script_binary = "quantize.exe" - - else: - print("WARNING: Unknown platform. Assuming a UNIX-like OS.\n") - quantize_script_binary = "quantize" - - parser = argparse.ArgumentParser( - prog='python3 quantize.py', - description='This script quantizes the given models by applying the ' - f'"{quantize_script_binary}" script on them.' - ) - parser.add_argument( - 'models', nargs='+', choices=('7B', '13B', '30B', '65B'), - help='The models to quantize.' - ) - parser.add_argument( - '-r', '--remove-16', action='store_true', dest='remove_f16', - help='Remove the f16 model after quantizing it.' - ) - parser.add_argument( - '-m', '--models-path', dest='models_path', - default=os.path.join(os.getcwd(), "models"), - help='Specify the directory where the models are located.' - ) - parser.add_argument( - '-q', '--quantize-script-path', dest='quantize_script_path', - default=os.path.join(os.getcwd(), quantize_script_binary), - help='Specify the path to the "quantize" script.' - ) - - # TODO: Revise this code - # parser.add_argument( - # '-t', '--threads', dest='threads', type='int', - # default=os.cpu_count(), - # help='Specify the number of threads to use to quantize many models at ' - # 'once. Defaults to os.cpu_count().' - # ) - - args = parser.parse_args() - args.models_path = os.path.abspath(args.models_path) - - if not os.path.isfile(args.quantize_script_path): - print( - f'The "{quantize_script_binary}" script was not found in the ' - "current location.\nIf you want to use it from another location, " - "set the --quantize-script-path argument from the command line." - ) - sys.exit(1) - - for model in args.models: - # The model is separated in various parts - # (ggml-model-f16.bin, ggml-model-f16.bin.0, ggml-model-f16.bin.1...) - f16_model_path_base = os.path.join( - args.models_path, model, "ggml-model-f16.bin" - ) - - if not os.path.isfile(f16_model_path_base): - print(f'The file %s was not found' % f16_model_path_base) - sys.exit(1) - - f16_model_parts_paths = map( - lambda filename: os.path.join(f16_model_path_base, filename), - glob.glob(f"{f16_model_path_base}*") - ) - - for f16_model_part_path in f16_model_parts_paths: - if not os.path.isfile(f16_model_part_path): - print( - f"The f16 model {os.path.basename(f16_model_part_path)} " - f"was not found in {args.models_path}{os.path.sep}{model}" - ". If you want to use it from another location, set the " - "--models-path argument from the command line." - ) - sys.exit(1) - - __run_quantize_script( - args.quantize_script_path, f16_model_part_path - ) - - if args.remove_f16: - os.remove(f16_model_part_path) - - -# This was extracted to a top-level function for parallelization, if -# implemented. See https://github.com/ggerganov/llama.cpp/pull/222/commits/f8db3d6cd91bf1a1342db9d29e3092bc12dd783c#r1140496406 - -def __run_quantize_script(script_path, f16_model_part_path): - """Run the quantize script specifying the path to it and the path to the - f16 model to quantize. - """ - - new_quantized_model_path = f16_model_part_path.replace("f16", "q4_0") - subprocess.run( - [script_path, f16_model_part_path, new_quantized_model_path, "2"], - check=True - ) - - -if __name__ == "__main__": - try: - main() - - except subprocess.CalledProcessError: - print("\nAn error ocurred while trying to quantize the models.") - sys.exit(1) - - except KeyboardInterrupt: - sys.exit(0) - - else: - print("\nSuccesfully quantized all models.") From cbef542879962fdc491656cd0c8cadd65a5f1356 Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Wed, 29 Mar 2023 21:31:24 +0200 Subject: [PATCH 62/76] py : cleanup the code - use f-strings where possible - drop first param of encode/decode functions since "utf-8" is the default --- convert-ggml-to-pth.py | 16 ++++++++-------- convert-gpt4all-to-ggml.py | 6 +++--- convert-gptq-to-ggml.py | 14 +++++++------- convert-pth-to-ggml.py | 6 +++--- convert-unversioned-ggml-to-ggml.py | 4 ++-- migrate-ggml-2023-03-30-pr613.py | 10 ++++------ 6 files changed, 27 insertions(+), 29 deletions(-) diff --git a/convert-ggml-to-pth.py b/convert-ggml-to-pth.py index 8ab17410d..7ddfe3a1b 100644 --- a/convert-ggml-to-pth.py +++ b/convert-ggml-to-pth.py @@ -27,9 +27,9 @@ def read_tokens(fin, vocab_size): text_len = struct.unpack("i", fin.read(4))[0] text_bytes = fin.read(text_len) try: - text = text_bytes.decode("utf-8") + text = text_bytes.decode() except UnicodeDecodeError: - text = text_bytes.decode("utf-8", "replace") + text = text_bytes.decode(errors="replace") score = struct.unpack("f", fin.read(4))[0] tokens.append((text, score)) return tokens @@ -82,7 +82,7 @@ def read_variables(fin): shape = tuple(struct.unpack("i" * n_dims, fin.read(4 * n_dims))) shape = shape[::-1] - name = fin.read(name_length).decode("utf-8") + name = fin.read(name_length).decode() # ensure tensor data is aligned tensor_data_offset = fin.tell() @@ -199,7 +199,7 @@ def chat(model, hparams, llama_dir): device = torch.device("cpu") llama = llama.to(device) - ctx = """You are AI. + ctx = """You are AI. This is a dialog, where User interacts with AI. AI is helpful, kind, obedient, honest, respectful, direct, concise, should try to protect User's privacy, and knows its own limits. Also, AI must answer User and AI cannot stop the conversation by itself. User: Hello, AI. AI: Hello! How can I assist you today? @@ -207,11 +207,11 @@ AI: Hello! How can I assist you today? print(ctx.rstrip("\n")) while True: print("-" * 60) - prompt = input(f"User: ") + prompt = input("User: ") if ctx != "": - ctx = ctx + "User: " + prompt + "\n" + ctx = f"{ctx}User: {prompt}\n" else: - ctx = prompt + "\nAI:" + ctx = f"{prompt}\nAI:" ctx = (ctx[-1920:]) if len(ctx) >= 2048 else ctx @@ -236,7 +236,7 @@ AI: Hello! How can I assist you today? ) s = generation_output.sequences[0] decoded = tokenizer.decode(s) - ctx = decoded + "\n" + ctx = f"{decoded}\n" def main(): diff --git a/convert-gpt4all-to-ggml.py b/convert-gpt4all-to-ggml.py index f1d9d7aef..b1a5e0560 100644 --- a/convert-gpt4all-to-ggml.py +++ b/convert-gpt4all-to-ggml.py @@ -49,7 +49,7 @@ def write_header(f_out, header): def write_tokens(fout, tokenizer): for i in range(tokenizer.vocab_size()): if tokenizer.is_unknown(i): - text = " \u2047 ".encode("utf-8") + text = " \u2047 ".encode() elif tokenizer.is_control(i): text = b"" elif tokenizer.is_byte(i): @@ -60,13 +60,13 @@ def write_tokens(fout, tokenizer): byte_value = int(piece[3:-1], 16) text = struct.pack("B", byte_value) else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode() fout.write(struct.pack("i", len(text))) fout.write(text) fout.write(struct.pack("f", tokenizer.get_score(i))) # TODO: GPT4All - add extra token - text = "".encode("utf-8") + text = "".encode() fout.write(struct.pack("i", len(text))) fout.write(text) fout.write(struct.pack("f", 0.0)) diff --git a/convert-gptq-to-ggml.py b/convert-gptq-to-ggml.py index 860eb148b..42e99c2ff 100644 --- a/convert-gptq-to-ggml.py +++ b/convert-gptq-to-ggml.py @@ -50,7 +50,7 @@ fout.write(struct.pack("i", 4)) # This loop unchanged from convert-pth-to-ggml.py: for i in range(tokenizer.vocab_size()): if tokenizer.is_unknown(i): - text = " \u2047 ".encode("utf-8") + text = " \u2047 ".encode() elif tokenizer.is_control(i): text = b"" elif tokenizer.is_byte(i): @@ -61,13 +61,13 @@ for i in range(tokenizer.vocab_size()): byte_value = int(piece[3:-1], 16) text = struct.pack("B", byte_value) else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode() fout.write(struct.pack("i", len(text))) fout.write(text) fout.write(struct.pack("f", tokenizer.get_score(i))) def write_header(shape, dst_name, ftype_cur): - sname = dst_name.encode('utf-8') + sname = dst_name.encode() fout.write(struct.pack("iii", len(shape), len(sname), ftype_cur)) fout.write(struct.pack("i" * len(shape), *shape[::-1])) fout.write(sname) @@ -80,7 +80,7 @@ def write_header(shape, dst_name, ftype_cur): def convert_non_q4(src_name, dst_name): v = model[src_name] shape = v.shape - print("Processing non-Q4 variable: " + src_name + " with shape: ", shape, " and type: ", v.dtype) + print(f"Processing non-Q4 variable: {src_name} with shape: {shape} and type: {v.dtype}") if len(shape) == 1: print(" Converting to float32") v = v.to(torch.float32) @@ -105,7 +105,7 @@ def convert_q4(src_name, dst_name, permute=False): # Each int32 item is actually 8 int4 items packed together, and it's transposed. shape = (qweight.shape[0], qweight.shape[1] * 8) - print("Processing Q4 variable: " + src_name + " with shape: ", shape) + print(f"Processing Q4 variable: {src_name} with shape: {shape}") # The output format has the int4 weights in groups of 32 rather than 8. # It looks like this: @@ -168,5 +168,5 @@ for i in range(n_layer): fout.close() -print("Done. Output file: " + fname_out) -print("") +print(f"Done. Output file: {fname_out}") +print() diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index df42e76bd..dcef2f6a3 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -120,7 +120,7 @@ def write_header(fout, hparams, ftype): def write_tokens(fout, tokenizer): for i in range(tokenizer.vocab_size()): if tokenizer.is_unknown(i): - text = " \u2047 ".encode("utf-8") + text = " \u2047 ".encode() elif tokenizer.is_control(i): text = b"" elif tokenizer.is_byte(i): @@ -131,7 +131,7 @@ def write_tokens(fout, tokenizer): byte_value = int(piece[3:-1], 16) text = struct.pack("B", byte_value) else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode() fout.write(struct.pack("i", len(text))) fout.write(text) fout.write(struct.pack("f", tokenizer.get_score(i))) @@ -191,7 +191,7 @@ def process_and_write_variables(fout, model, ftype, part_id, n_parts): fullshape = list(partshape) if n_dims > 1: fullshape[split_dim] *= n_parts - sname = name.encode('utf-8') + sname = name.encode() fout.write(struct.pack("iii", n_dims, len(sname), ftype_cur)) for dim in reversed(fullshape): fout.write(struct.pack("i", dim)) diff --git a/convert-unversioned-ggml-to-ggml.py b/convert-unversioned-ggml-to-ggml.py index 33b6243bd..5151d9081 100644 --- a/convert-unversioned-ggml-to-ggml.py +++ b/convert-unversioned-ggml-to-ggml.py @@ -44,7 +44,7 @@ def write_header(f_out, header): def write_tokens(fout, tokenizer): for i in range(tokenizer.vocab_size()): if tokenizer.is_unknown(i): - text = " \u2047 ".encode("utf-8") + text = " \u2047 ".encode() elif tokenizer.is_control(i): text = b"" elif tokenizer.is_byte(i): @@ -55,7 +55,7 @@ def write_tokens(fout, tokenizer): byte_value = int(piece[3:-1], 16) text = struct.pack("B", byte_value) else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode() fout.write(struct.pack("i", len(text))) fout.write(text) fout.write(struct.pack("f", tokenizer.get_score(i))) diff --git a/migrate-ggml-2023-03-30-pr613.py b/migrate-ggml-2023-03-30-pr613.py index 5596f6c55..b6ef2476e 100644 --- a/migrate-ggml-2023-03-30-pr613.py +++ b/migrate-ggml-2023-03-30-pr613.py @@ -272,13 +272,11 @@ def main(): tokens = read_tokens(fin, hparams) if hparams['magic'] == 0x67676a74: # ggjt - print("%s: input ggml has already been converted to 'ggjt' magic\n" % - (args.fin_path)) + print(f"{args.fin_path}: input ggml has already been converted to 'ggjt' magic\n") sys.exit(1) if hparams['magic'] != 0x67676d66: # ggmf - print("%s: input ggml file doesn't have expected 'ggmf' magic: %#x\n" % - (args.fin_path, hparams['magic'])) + print(f"{args.fin_path}: input ggml file doesn't have expected 'ggmf' magic: {hparams['magic']:#x}\n") sys.exit(1) hparams['magic'] = 0x67676a74 # ggjt @@ -286,7 +284,7 @@ def main(): # count number of multipart files by convention n_parts = 1 while True: - if os.path.exists("%s.%d" % (args.fin_path, n_parts)): + if os.path.exists(f"{args.fin_path}.{n_parts}"): n_parts += 1 else: break @@ -302,7 +300,7 @@ def main(): print(f"Processing part {part_id+1} of {n_parts}\n") fin_path = args.fin_path if part_id > 0: - fin_path += ".%d" % (part_id) + fin_path += f".{part_id}" with open(fin_path, "rb") as fin: read_tokens(fin, read_hparams(fin)) copy_tensors(fin, fout, part_id, n_parts) From 02c5b27e91a6d18cf1043d3a2d8dbc59610ac257 Mon Sep 17 00:00:00 2001 From: perserk Date: Fri, 31 Mar 2023 16:55:44 +0500 Subject: [PATCH 63/76] Add AVX acceleration (#617) * ggml : add AVX quantize_row_q4_0() * ggml : add AVX ggml_vec_dot_q4_0() * ggml : refactor AVX part of ggml_vec_dot_q4_0() https://github.com/ggerganov/llama.cpp/pull/617#issuecomment-1489985645 --- ggml.c | 153 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/ggml.c b/ggml.c index 25fa72632..ffd54ec41 100644 --- a/ggml.c +++ b/ggml.c @@ -461,6 +461,39 @@ static inline __m128i packNibbles( __m256i bytes ) __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); return _mm_packus_epi16( r0, r1 ); } +#elif __AVX__ +static inline __m128i bytesFromNibbles( const uint8_t* rsi ) +{ + // Load 8 bytes from memory + __m128i tmp = _mm_loadu_si64( ( const __m128i* )rsi ); + + // Expand bytes into uint16_t values + __m128i bytes = _mm_cvtepu8_epi16( tmp ); + + // Unpack values into individual bytes + const __m128i lowMask = _mm_set1_epi8( 0xF ); + __m128i high = _mm_andnot_si128( lowMask, bytes ); + __m128i low = _mm_and_si128( lowMask, bytes ); + high = _mm_slli_epi16( high, 4 ); + bytes = _mm_or_si128( low, high ); + return bytes; +} + +static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh + const __m128i lowByte = _mm_set1_epi16( 0xFF ); + __m128i high = _mm_andnot_si128( lowByte, bytes1 ); + __m128i low = _mm_and_si128( lowByte, bytes1 ); + high = _mm_srli_epi16( high, 4 ); + bytes1 = _mm_or_si128( low, high ); + high = _mm_andnot_si128( lowByte, bytes2 ); + low = _mm_and_si128( lowByte, bytes2 ); + high = _mm_srli_epi16( high, 4 ); + bytes2 = _mm_or_si128( low, high ); + + return _mm_packus_epi16( bytes1, bytes2); +} #endif // method 5 @@ -660,6 +693,80 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int __m128i res = packNibbles( i0 ); _mm_storeu_si128( ( __m128i* )y[i].qs, res ); } +#elif defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = maxScalar / 7.0f; + y[i].d = d; + const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ] + const __m128i off = _mm_set1_epi8( 8); + ni0 = _mm_add_epi8( ni0, off ); + ni4 = _mm_add_epi8( ni4, off ); + + // Compress the vector into 4 bit/value, and store + __m128i res = packNibbles( ni0, ni4 ); + _mm_storeu_si128( ( __m128i* )y[i].qs, res ); + } #elif defined(__wasm_simd128__) for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max @@ -1892,6 +1999,52 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest res = _mm_add_ps( res, _mm_movehl_ps( res, res ) ); res = _mm_add_ss( res, _mm_movehdup_ps( res ) ); + sumf = _mm_cvtss_f32( res ); +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (int i = 0; i < nb; ++i) { + // Compute combined scale for the block + const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) ); + + __m128i i32[2]; + for (int j = 0; j < 2; ++j) { + // Load 8 bytes, and unpack 4 bit fields into bytes, making 16 bytes + __m128i bx = bytesFromNibbles( x[i].qs + 8*j ); + __m128i by = bytesFromNibbles( y[i].qs + 8*j ); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. + const __m128i off = _mm_set1_epi8( 8 ); + bx = _mm_sub_epi8( bx, off ); + by = _mm_sub_epi8( by, off ); + + // Get absolute values of x vectors + const __m128i ax = _mm_sign_epi8(bx, bx); + + // Sign the values of the y vectors + const __m128i sy = _mm_sign_epi8(by, bx); + + // Perform multiplication and create 16-bit values + const __m128i dot = _mm_maddubs_epi16(ax, sy); + + const __m128i ones = _mm_set1_epi16(1); + i32[j] = _mm_madd_epi16(ones, dot); + } + + // Convert int32_t to float + __m256 p = _mm256_cvtepi32_ps( _mm256_set_m128i( i32[0], i32[1] )); + // Apply the scale, and accumulate + acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc); + } + + // Return horizontal sum of the acc vector + __m128 res = _mm256_extractf128_ps( acc, 1 ); + res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) ); + res = _mm_add_ps( res, _mm_movehl_ps( res, res ) ); + res = _mm_add_ss( res, _mm_movehdup_ps( res ) ); + sumf = _mm_cvtss_f32( res ); #elif defined(__wasm_simd128__) // wasm simd From 1d08882afa647c44195f4f6495a68ea455650cae Mon Sep 17 00:00:00 2001 From: slaren <2141330+slaren@users.noreply.github.com> Date: Fri, 31 Mar 2023 17:55:52 +0200 Subject: [PATCH 64/76] Optimize AVX2 ggml_vec_dot_q4_0 (#642) --- ggml.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/ggml.c b/ggml.c index ffd54ec41..8e051dd2e 100644 --- a/ggml.c +++ b/ggml.c @@ -1833,7 +1833,7 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest const block_q4_0 * restrict x = vx; const block_q4_0 * restrict y = vy; - ggml_float sumf = 0.0; + float sumf = 0.0; #if defined(__ARM_NEON) float sum0 = 0.0f; @@ -1928,7 +1928,7 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest #endif } - sumf = (ggml_float)(sum0 + sum1); + sumf = sum0 + sum1; #elif defined(__AVX512F__) // Initialize accumulator with zeros __m512 acc0 = _mm512_setzero_ps(); @@ -1962,6 +1962,10 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest __m256 acc = _mm256_setzero_ps(); // Main loop + // TODO: figure a way to do this in a portable way + #ifdef __GNUC__ + #pragma GCC unroll 16 + #endif for (int i = 0; i < nb; ++i) { // Compute combined scale for the block const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) ); @@ -1975,20 +1979,21 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest bx = _mm256_sub_epi8( bx, off ); by = _mm256_sub_epi8( by, off ); - // Sign-extend first 16 signed bytes into int16_t - __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) ); - __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) ); - // Compute products of int16_t integers, add pairwise - __m256i i32 = _mm256_madd_epi16( x16, y16 ); + // Get absolute values of x vectors + const __m256i ax = _mm256_sign_epi8(bx, bx); - // Sign-extend last 16 signed bytes into int16_t vectors - x16 = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) ); - y16 = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) ); - // Accumulate products of int16_t integers - i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16, y16 ) ); + // Sign the values of the y vectors + const __m256i sy = _mm256_sign_epi8(by, bx); + + // Perform multiplication and create 16-bit values + const __m256i dot = _mm256_maddubs_epi16(ax, sy); + + const __m256i ones = _mm256_set1_epi16(1); + const __m256i i32 = _mm256_madd_epi16(ones, dot); // Convert int32_t to float - __m256 p = _mm256_cvtepi32_ps( i32 ); + const __m256 p = _mm256_cvtepi32_ps( i32 ); + // Apply the scale, and accumulate acc = _mm256_fmadd_ps( d, p, acc ); } From 3525899277d2e2bdc8ec3f0e6e40c47251608700 Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Fri, 31 Mar 2023 19:19:16 +0000 Subject: [PATCH 65/76] Enable -std= for cmake builds, fix warnings (#598) --- CMakeLists.txt | 2 ++ ggml.c | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 37f22700b..1a434f07b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -68,7 +68,9 @@ option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE}) # Compile flags # +set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED true) +set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED true) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) diff --git a/ggml.c b/ggml.c index 8e051dd2e..b6dd3f3cf 100644 --- a/ggml.c +++ b/ggml.c @@ -542,8 +542,8 @@ static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * r const uint8_t vi0 = (int8_t)roundf(v0) + 8; const uint8_t vi1 = (int8_t)roundf(v1) + 8; - assert(vi0 >= 0 && vi0 < 16); - assert(vi1 >= 0 && vi1 < 16); + assert(vi0 < 16); + assert(vi1 < 16); pp[l/2] = vi0 | (vi1 << 4); } @@ -837,8 +837,8 @@ static void quantize_row_q4_1_reference(const float * restrict x, void * restric const uint8_t vi0 = roundf(v0); const uint8_t vi1 = roundf(v1); - assert(vi0 >= 0 && vi0 < 16); - assert(vi1 >= 0 && vi1 < 16); + assert(vi0 < 16); + assert(vi1 < 16); pp[l/2] = vi0 | (vi1 << 4); } From 0d054e292e5492981867be69c788edd04dc8adeb Mon Sep 17 00:00:00 2001 From: Slaren <2141330+slaren@users.noreply.github.com> Date: Fri, 31 Mar 2023 20:03:48 +0200 Subject: [PATCH 66/76] Show error message when -f fails --- examples/common.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/examples/common.cpp b/examples/common.cpp index af3ad9eb7..50c536503 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -66,6 +66,11 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } std::ifstream file(argv[i]); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + invalid_param = true; + break; + } std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.prompt)); if (params.prompt.back() == '\n') { params.prompt.pop_back(); From d0a7f742e76bb48c0bd852f0b3bf09ec0b75b200 Mon Sep 17 00:00:00 2001 From: rimoliga <53384203+rimoliga@users.noreply.github.com> Date: Sat, 1 Apr 2023 11:57:30 -0300 Subject: [PATCH 67/76] readme: replace termux links with homepage, play store is deprecated (#680) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 07066cd81..f5744eacc 100644 --- a/README.md +++ b/README.md @@ -301,7 +301,7 @@ And after 4.45 hours, you will have the final perplexity. ### Android -You can easily run `llama.cpp` on Android device with [termux](https://play.google.com/store/apps/details?id=com.termux). +You can easily run `llama.cpp` on Android device with [termux](https://termux.dev/). First, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake: ``` $ mkdir build-android @@ -310,7 +310,7 @@ $ export NDK= $ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod .. $ make ``` -Install [termux](https://play.google.com/store/apps/details?id=com.termux) on your device and run `termux-setup-storage` to get access to your SD card. +Install [termux](https://termux.dev/) on your device and run `termux-setup-storage` to get access to your SD card. Finally, copy the `llama` binary and the model files to your device storage. Here is a demo of an interactive session running on Pixel 5 phone: https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4 From a717cba8440b380f43cd3e2510862fc1ea3de9a2 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sun, 2 Apr 2023 01:38:18 +0900 Subject: [PATCH 68/76] py: huggingface -> Hugging Face (#686) --- convert-ggml-to-pth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-ggml-to-pth.py b/convert-ggml-to-pth.py index 7ddfe3a1b..25a44237a 100644 --- a/convert-ggml-to-pth.py +++ b/convert-ggml-to-pth.py @@ -254,7 +254,7 @@ def main(): parser.add_argument( "--hf", action="store_true", - help="Whether to save the model in the huggingface format. (default: False)", + help="Whether to save the model in the Hugging Face format. (default: False)", ) parser.add_argument( "--chat", "-c", action="store_true", help="Whether to open a chat with the model. (default: False)" From 5b70e7de4c0b8186669d0c5609ba61a2d46de562 Mon Sep 17 00:00:00 2001 From: Murilo Santana Date: Sat, 1 Apr 2023 23:41:12 -0300 Subject: [PATCH 69/76] fix default params for examples/main (#697) --- examples/common.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index 50c536503..5400f6b01 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -39,6 +39,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { bool invalid_param = false; std::string arg; + gpt_params default_params; + for (int i = 1; i < argc; i++) { arg = argv[i]; @@ -173,7 +175,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { } params.n_parts = std::stoi(argv[i]); } else if (arg == "-h" || arg == "--help") { - gpt_print_usage(argc, argv, params); + gpt_print_usage(argc, argv, default_params); exit(0); } else if (arg == "--random-prompt") { params.random_prompt = true; @@ -185,13 +187,13 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.input_prefix = argv[i]; } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); - gpt_print_usage(argc, argv, params); + gpt_print_usage(argc, argv, default_params); exit(1); } } if (invalid_param) { fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); - gpt_print_usage(argc, argv, params); + gpt_print_usage(argc, argv, default_params); exit(1); } From c4f89d8d73aab4318a6c61e3835135adfcf55407 Mon Sep 17 00:00:00 2001 From: Fabian Date: Sun, 2 Apr 2023 09:17:05 +0200 Subject: [PATCH 70/76] make : use -march=native -mtune=native on x86 (#609) --- Makefile | 91 ++------------------------------------------------------ 1 file changed, 2 insertions(+), 89 deletions(-) diff --git a/Makefile b/Makefile index 83a4514ef..2f828bf10 100644 --- a/Makefile +++ b/Makefile @@ -70,95 +70,8 @@ endif # TODO: probably these flags need to be tweaked on some architectures # feel free to update the Makefile for your architecture and send a pull request or issue ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686)) - ifeq ($(UNAME_S),Darwin) - F16C_M := $(shell sysctl machdep.cpu.features) - ifneq (,$(findstring F16C,$(F16C_M))) - CFLAGS += -mf16c - endif - AVX1_M := $(shell sysctl machdep.cpu.features) - ifneq (,$(findstring FMA,$(AVX1_M))) - CFLAGS += -mfma - endif - ifneq (,$(findstring AVX1.0,$(AVX1_M))) - CFLAGS += -mavx - endif - AVX2_M := $(shell sysctl machdep.cpu.leaf7_features) - ifneq (,$(findstring AVX2,$(AVX2_M))) - CFLAGS += -mavx2 - endif - else ifeq ($(UNAME_S),Linux) - AVX1_M := $(shell grep "avx " /proc/cpuinfo) - ifneq (,$(findstring avx,$(AVX1_M))) - CFLAGS += -mavx - endif - AVX2_M := $(shell grep "avx2 " /proc/cpuinfo) - ifneq (,$(findstring avx2,$(AVX2_M))) - CFLAGS += -mavx2 - endif - FMA_M := $(shell grep "fma " /proc/cpuinfo) - ifneq (,$(findstring fma,$(FMA_M))) - CFLAGS += -mfma - endif - F16C_M := $(shell grep "f16c " /proc/cpuinfo) - ifneq (,$(findstring f16c,$(F16C_M))) - CFLAGS += -mf16c - endif - SSE3_M := $(shell grep "sse3 " /proc/cpuinfo) - ifneq (,$(findstring sse3,$(SSE3_M))) - CFLAGS += -msse3 - endif - AVX512F_M := $(shell grep "avx512f " /proc/cpuinfo) - ifneq (,$(findstring avx512f,$(AVX512F_M))) - CFLAGS += -mavx512f - endif - AVX512BW_M := $(shell grep "avx512bw " /proc/cpuinfo) - ifneq (,$(findstring avx512bw,$(AVX512BW_M))) - CFLAGS += -mavx512bw - endif - AVX512DQ_M := $(shell grep "avx512dq " /proc/cpuinfo) - ifneq (,$(findstring avx512dq,$(AVX512DQ_M))) - CFLAGS += -mavx512dq - endif - AVX512VL_M := $(shell grep "avx512vl " /proc/cpuinfo) - ifneq (,$(findstring avx512vl,$(AVX512VL_M))) - CFLAGS += -mavx512vl - endif - AVX512CD_M := $(shell grep "avx512cd " /proc/cpuinfo) - ifneq (,$(findstring avx512cd,$(AVX512CD_M))) - CFLAGS += -mavx512cd - endif - AVX512ER_M := $(shell grep "avx512er " /proc/cpuinfo) - ifneq (,$(findstring avx512er,$(AVX512ER_M))) - CFLAGS += -mavx512er - endif - AVX512IFMA_M := $(shell grep "avx512ifma " /proc/cpuinfo) - ifneq (,$(findstring avx512ifma,$(AVX512IFMA_M))) - CFLAGS += -mavx512ifma - endif - AVX512PF_M := $(shell grep "avx512pf " /proc/cpuinfo) - ifneq (,$(findstring avx512pf,$(AVX512PF_M))) - CFLAGS += -mavx512pf - endif - else ifeq ($(UNAME_S),Haiku) - AVX1_M := $(shell sysinfo -cpu | grep -w "AVX") - ifneq (,$(findstring AVX,$(AVX1_M))) - CFLAGS += -mavx - endif - AVX2_M := $(shell sysinfo -cpu | grep -w "AVX2") - ifneq (,$(findstring AVX2,$(AVX2_M))) - CFLAGS += -mavx2 - endif - FMA_M := $(shell sysinfo -cpu | grep -w "FMA") - ifneq (,$(findstring FMA,$(FMA_M))) - CFLAGS += -mfma - endif - F16C_M := $(shell sysinfo -cpu | grep -w "F16C") - ifneq (,$(findstring F16C,$(F16C_M))) - CFLAGS += -mf16c - endif - else - CFLAGS += -mfma -mf16c -mavx -mavx2 - endif + # Use all CPU extensions that are available: + CFLAGS += -march=native -mtune=native endif ifneq ($(filter ppc64%,$(UNAME_M)),) POWER9_M := $(shell grep "POWER9" /proc/cpuinfo) From 81040f10aae3160317c5787c9c59acb219927826 Mon Sep 17 00:00:00 2001 From: Stephan Walter Date: Sun, 2 Apr 2023 07:18:53 +0000 Subject: [PATCH 71/76] llama : do not allocate KV cache for "vocab_only == true" (#682) Fixes sanitizer CI --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index bed24207d..1b3157cd2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1608,7 +1608,7 @@ struct llama_context * llama_init_from_file( } // reserve memory for context buffers - { + if (!params.vocab_only) { if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) { fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); llama_free(ctx); From 6e7801d08d81c931a5427bae46f00763e993f54a Mon Sep 17 00:00:00 2001 From: Leonardo Neumann Date: Sun, 2 Apr 2023 04:56:20 -0300 Subject: [PATCH 72/76] examples : add gpt4all script (#658) --- examples/gpt4all.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100755 examples/gpt4all.sh diff --git a/examples/gpt4all.sh b/examples/gpt4all.sh new file mode 100755 index 000000000..d974f95a9 --- /dev/null +++ b/examples/gpt4all.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# +# Temporary script - will be removed in the future +# + +cd `dirname $0` +cd .. + +./main --color --instruct --threads 4 \ + --model ./models/gpt4all-7B/gpt4all-lora-quantized.bin \ + --file ./prompts/alpaca.txt \ + --batch_size 8 --ctx_size 2048 \ + --repeat_last_n 64 --repeat_penalty 1.3 \ + --n_predict 128 --temp 0.1 --top_k 40 --top_p 0.95 From c0bb1d3ce21005ab21d686626ba87261a6e3a660 Mon Sep 17 00:00:00 2001 From: Marian Cepok Date: Sun, 2 Apr 2023 12:21:31 +0200 Subject: [PATCH 73/76] ggml : change ne to int64_t (#626) --- ggml.c | 691 +++++++++++++++++++++++++++--------------------------- ggml.h | 52 ++-- llama.cpp | 6 +- 3 files changed, 375 insertions(+), 374 deletions(-) diff --git a/ggml.c b/ggml.c index b6dd3f3cf..63aa5eb6e 100644 --- a/ggml.c +++ b/ggml.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -2774,7 +2775,7 @@ void ggml_print_objects(const struct ggml_context * ctx) { GGML_PRINT("%s: --- end ---\n", __func__); } -int ggml_nelements(const struct ggml_tensor * tensor) { +int64_t ggml_nelements(const struct ggml_tensor * tensor) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; @@ -3090,7 +3091,7 @@ struct ggml_tensor * ggml_new_tensor_impl( struct ggml_context * ctx, enum ggml_type type, int n_dims, - const int* ne, + const int64_t* ne, void* data) { // always insert objects at the end of the context's memory pool struct ggml_object * obj_cur = ctx->objects_end; @@ -3210,44 +3211,44 @@ struct ggml_tensor * ggml_new_tensor( struct ggml_context * ctx, enum ggml_type type, int n_dims, - const int * ne) { + const int64_t * ne) { return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL); } struct ggml_tensor * ggml_new_tensor_1d( struct ggml_context * ctx, enum ggml_type type, - int ne0) { + int64_t ne0) { return ggml_new_tensor(ctx, type, 1, &ne0); } struct ggml_tensor * ggml_new_tensor_2d( struct ggml_context * ctx, enum ggml_type type, - int ne0, - int ne1) { - const int ne[2] = { ne0, ne1 }; + int64_t ne0, + int64_t ne1) { + const int64_t ne[2] = { ne0, ne1 }; return ggml_new_tensor(ctx, type, 2, ne); } struct ggml_tensor * ggml_new_tensor_3d( struct ggml_context * ctx, enum ggml_type type, - int ne0, - int ne1, - int ne2) { - const int ne[3] = { ne0, ne1, ne2 }; + int64_t ne0, + int64_t ne1, + int64_t ne2) { + const int64_t ne[3] = { ne0, ne1, ne2 }; return ggml_new_tensor(ctx, type, 3, ne); } struct ggml_tensor * ggml_new_tensor_4d( struct ggml_context * ctx, enum ggml_type type, - int ne0, - int ne1, - int ne2, - int ne3) { - const int ne[4] = { ne0, ne1, ne2, ne3 }; + int64_t ne0, + int64_t ne1, + int64_t ne2, + int64_t ne3) { + const int64_t ne[4] = { ne0, ne1, ne2, ne3 }; return ggml_new_tensor(ctx, type, 4, ne); } @@ -3894,7 +3895,7 @@ struct ggml_tensor * ggml_mean( is_node = true; } - int ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; + int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne); result->op = GGML_OP_MEAN; @@ -4255,7 +4256,7 @@ struct ggml_tensor * ggml_mul_mat( is_node = true; } - const int ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] }; + const int64_t ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] }; struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne); result->op = GGML_OP_MUL_MAT; @@ -4380,8 +4381,8 @@ struct ggml_tensor * ggml_reshape( struct ggml_tensor * ggml_reshape_2d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1) { + int64_t ne0, + int64_t ne1) { GGML_ASSERT(ggml_is_contiguous(a)); GGML_ASSERT(ggml_nelements(a) == ne0*ne1); @@ -4392,7 +4393,7 @@ struct ggml_tensor * ggml_reshape_2d( is_node = true; } - const int ne[2] = { ne0, ne1 }; + const int64_t ne[2] = { ne0, ne1 }; struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data); result->op = GGML_OP_RESHAPE; @@ -4406,9 +4407,9 @@ struct ggml_tensor * ggml_reshape_2d( struct ggml_tensor * ggml_reshape_3d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1, - int ne2) { + int64_t ne0, + int64_t ne1, + int64_t ne2) { GGML_ASSERT(ggml_is_contiguous(a)); GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2); @@ -4419,7 +4420,7 @@ struct ggml_tensor * ggml_reshape_3d( is_node = true; } - const int ne[3] = { ne0, ne1, ne2 }; + const int64_t ne[3] = { ne0, ne1, ne2 }; struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data); result->op = GGML_OP_RESHAPE; @@ -4435,7 +4436,7 @@ struct ggml_tensor * ggml_reshape_3d( struct ggml_tensor * ggml_view_1d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, + int64_t ne0, size_t offset) { if (a->grad) { GGML_ASSERT(false); // gradient propagation is not supported @@ -4456,15 +4457,15 @@ struct ggml_tensor * ggml_view_1d( struct ggml_tensor * ggml_view_2d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1, + int64_t ne0, + int64_t ne1, size_t nb1, size_t offset) { if (a->grad) { GGML_ASSERT(false); // gradient propagation is not supported } - const int ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 }; + const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 }; struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset); @@ -4695,7 +4696,7 @@ struct ggml_tensor * ggml_conv_1d_1s( is_node = true; } - const int ne[4] = { b->ne[0], a->ne[2], 1, 1, }; + const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, }; struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); result->op = GGML_OP_CONV_1D_1S; @@ -4722,7 +4723,7 @@ struct ggml_tensor * ggml_conv_1d_2s( is_node = true; } - const int ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, }; + const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, }; struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); result->op = GGML_OP_CONV_1D_2S; @@ -4822,10 +4823,10 @@ static void ggml_compute_forward_dup_f16( return; } - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; const size_t nb00 = src0->nb[0]; const size_t nb01 = src0->nb[1]; @@ -4842,9 +4843,9 @@ static void ggml_compute_forward_dup_f16( size_t id = 0; const size_t rs = ne00*nb00; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; char * dst_ptr = (char *) dst->data + id*rs; @@ -4858,10 +4859,10 @@ static void ggml_compute_forward_dup_f16( size_t id = 0; float * dst_ptr = (float *) dst->data; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); @@ -4880,10 +4881,10 @@ static void ggml_compute_forward_dup_f16( size_t id = 0; float * dst_ptr = (float *) dst->data; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); @@ -4896,10 +4897,10 @@ static void ggml_compute_forward_dup_f16( size_t id = 0; ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = *src0_ptr; @@ -4926,10 +4927,10 @@ static void ggml_compute_forward_dup_f32( return; } - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; const size_t nb00 = src0->nb[0]; const size_t nb01 = src0->nb[1]; @@ -4946,9 +4947,9 @@ static void ggml_compute_forward_dup_f32( size_t id = 0; const size_t rs = ne00*nb00; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; char * dst_ptr = (char *) dst->data + id*rs; @@ -4962,10 +4963,10 @@ static void ggml_compute_forward_dup_f32( size_t id = 0; ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); @@ -4984,10 +4985,10 @@ static void ggml_compute_forward_dup_f32( size_t id = 0; float * dst_ptr = (float *) dst->data; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = *src0_ptr; @@ -5000,10 +5001,10 @@ static void ggml_compute_forward_dup_f32( size_t id = 0; ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); @@ -5389,18 +5390,18 @@ static void ggml_compute_forward_sum_f32( assert(ggml_is_scalar(dst)); assert(src0->nb[0] == sizeof(float)); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; const size_t nb01 = src0->nb[1]; const size_t nb02 = src0->nb[2]; const size_t nb03 = src0->nb[3]; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { ggml_vec_sum_f32(ne00, (float *) (dst->data), (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); @@ -5445,19 +5446,19 @@ static void ggml_compute_forward_mean_f32( assert(src0->nb[0] == sizeof(float)); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; const size_t nb01 = src0->nb[1]; const size_t nb02 = src0->nb[2]; const size_t nb03 = src0->nb[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - const int ne2 = dst->ne[2]; - const int ne3 = dst->ne[3]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + const int64_t ne2 = dst->ne[2]; + const int64_t ne3 = dst->ne[3]; assert(ne0 == 1); assert(ne1 == ne01); @@ -5473,9 +5474,9 @@ static void ggml_compute_forward_mean_f32( const size_t nb2 = dst->nb[2]; const size_t nb3 = dst->nb[3]; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); @@ -5962,10 +5963,10 @@ static void ggml_compute_forward_norm_f32( const int ith = params->ith; const int nth = params->nth; - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; const size_t nb01 = src0->nb[1]; const size_t nb02 = src0->nb[2]; @@ -5978,13 +5979,13 @@ static void ggml_compute_forward_norm_f32( const float eps = 1e-5f; // TODO: make this a parameter // TODO: optimize - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = ith; i01 < ne01; i01 += nth) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); ggml_float sum = 0.0; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { sum += (ggml_float)x[i00]; } @@ -5993,7 +5994,7 @@ static void ggml_compute_forward_norm_f32( float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); ggml_float sum2 = 0.0; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { float v = x[i00] - mean; y[i00] = v; sum2 += (ggml_float)(v*v); @@ -6045,10 +6046,10 @@ static void ggml_compute_forward_rms_norm_f32( const int ith = params->ith; const int nth = params->nth; - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; const size_t nb01 = src0->nb[1]; const size_t nb02 = src0->nb[2]; @@ -6061,13 +6062,13 @@ static void ggml_compute_forward_rms_norm_f32( const float eps = 1e-6f; // TODO: make this a parameter // TODO: optimize - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = ith; i01 < ne01; i01 += nth) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); ggml_float sum = 0.0; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { sum += (ggml_float)(x[i00] * x[i00]); } @@ -6120,13 +6121,13 @@ static bool ggml_compute_forward_mul_mat_use_blas( const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { - //const int ne00 = src0->ne[0]; - //const int ne01 = src0->ne[1]; + //const int64_t ne00 = src0->ne[0]; + //const int64_t ne01 = src0->ne[1]; - const int ne10 = src1->ne[0]; + const int64_t ne10 = src1->ne[0]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; // TODO: find the optimal values for these if (ggml_is_contiguous(src0) && @@ -6148,23 +6149,23 @@ static void ggml_compute_forward_mul_mat_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - const int ne10 = src1->ne[0]; + const int64_t ne10 = src1->ne[0]; #endif - const int ne11 = src1->ne[1]; + const int64_t ne11 = src1->ne[1]; #ifndef NDEBUG - const int ne12 = src1->ne[2]; - const int ne13 = src1->ne[3]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - const int ne2 = dst->ne[2]; - const int ne3 = dst->ne[3]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + const int64_t ne2 = dst->ne[2]; + const int64_t ne3 = dst->ne[3]; const int nb00 = src0->nb[0]; #endif @@ -6224,8 +6225,8 @@ static void ggml_compute_forward_mul_mat_f32( return; } - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); @@ -6272,7 +6273,7 @@ static void ggml_compute_forward_mul_mat_f32( const int i02 = (ir - i03*ne02*ne01)/ne01; const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - for (int ic = 0; ic < ne11; ++ic) { + for (int64_t ic = 0; ic < ne11; ++ic) { // src1 indices const int i13 = i03; const int i12 = i02; @@ -6313,21 +6314,21 @@ static void ggml_compute_forward_mul_mat_f16_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - const int ne12 = src1->ne[2]; - const int ne13 = src1->ne[3]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - const int ne2 = dst->ne[2]; - const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + const int64_t ne2 = dst->ne[2]; + const int64_t ne3 = dst->ne[3]; + //const int64_t ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -6387,12 +6388,12 @@ static void ggml_compute_forward_mul_mat_f16_f32( float * const wdata = params->wdata; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { { size_t id = 0; - for (int i01 = 0; i01 < ne01; ++i01) { - for (int i00 = 0; i00 < ne00; ++i00) { + for (int64_t i01 = 0; i01 < ne01; ++i01) { + for (int64_t i00 = 0; i00 < ne00; ++i00) { wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); } } @@ -6422,10 +6423,10 @@ static void ggml_compute_forward_mul_mat_f16_f32( ggml_fp16_t * const wdata = params->wdata; size_t id = 0; - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - for (int i11 = 0; i11 < ne11; ++i11) { - for (int i10 = 0; i10 < ne10; ++i10) { + for (int64_t i13 = 0; i13 < ne13; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + for (int64_t i10 = 0; i10 < ne10; ++i10) { wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); } } @@ -6477,7 +6478,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); - for (int ic = 0; ic < ne11; ++ic) { + for (int64_t ic = 0; ic < ne11; ++ic) { ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); } } @@ -6526,20 +6527,20 @@ static void ggml_compute_forward_mul_mat_q_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[3]; - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - const int ne12 = src1->ne[2]; - const int ne13 = src1->ne[3]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - const int ne2 = dst->ne[2]; - const int ne3 = dst->ne[3]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + const int64_t ne2 = dst->ne[2]; + const int64_t ne3 = dst->ne[3]; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -6603,11 +6604,11 @@ static void ggml_compute_forward_mul_mat_q_f32( float * const wdata = params->wdata; dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q; - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { { size_t id = 0; - for (int i01 = 0; i01 < ne01; ++i01) { + for (int64_t i01 = 0; i01 < ne01; ++i01) { dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); id += ne00; } @@ -6637,9 +6638,9 @@ static void ggml_compute_forward_mul_mat_q_f32( char * wdata = params->wdata; const size_t row_size = ne10*GGML_TYPE_SIZE[type]/GGML_BLCK_SIZE[type]; - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - for (int i11 = 0; i11 < ne11; ++i11) { + for (int64_t i13 = 0; i13 < ne13; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { quantize_row_q((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); wdata += row_size; } @@ -6688,7 +6689,7 @@ static void ggml_compute_forward_mul_mat_q_f32( assert(ne00 % 32 == 0); - for (int ic = 0; ic < ne11; ++ic) { + for (int64_t ic = 0; ic < ne11; ++ic) { vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); } } @@ -7181,10 +7182,10 @@ static void ggml_compute_forward_rope_f32( const int n_dims = ((int32_t *) src1->data)[1]; const int mode = ((int32_t *) src1->data)[2]; - //const int ne0 = src0->ne[0]; - const int ne1 = src0->ne[1]; - const int ne2 = src0->ne[2]; - const int ne3 = src0->ne[3]; + //const int64_t ne0 = src0->ne[0]; + const int64_t ne1 = src0->ne[1]; + const int64_t ne2 = src0->ne[2]; + const int64_t ne3 = src0->ne[3]; const int nb0 = src0->nb[0]; const int nb1 = src0->nb[1]; @@ -7197,10 +7198,10 @@ static void ggml_compute_forward_rope_f32( assert(nb0 == sizeof(float)); // TODO: optimize - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) { const int p = (mode == 0 ? n_past + i2 : i2); - for (int i1 = 0; i1 < ne1; i1++) { + for (int64_t i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < n_dims; i0 += 2) { const float theta = powf(10000.0, ((float)-i0)/n_dims); @@ -7238,10 +7239,10 @@ static void ggml_compute_forward_rope_f16( const int n_dims = ((int32_t *) src1->data)[1]; const int mode = ((int32_t *) src1->data)[2]; - //const int ne0 = src0->ne[0]; - const int ne1 = src0->ne[1]; - const int ne2 = src0->ne[2]; - const int ne3 = src0->ne[3]; + //const int64_t ne0 = src0->ne[0]; + const int64_t ne1 = src0->ne[1]; + const int64_t ne2 = src0->ne[2]; + const int64_t ne3 = src0->ne[3]; const int nb0 = src0->nb[0]; const int nb1 = src0->nb[1]; @@ -7253,10 +7254,10 @@ static void ggml_compute_forward_rope_f16( assert(nb0 == sizeof(ggml_fp16_t)); - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) { const int p = (mode == 0 ? n_past + i2 : i2); - for (int i1 = 0; i1 < ne1; i1++) { + for (int64_t i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < n_dims; i0 += 2) { const float theta = powf(10000.0, ((float)-i0)/n_dims); @@ -7317,21 +7318,21 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - //const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + //const int64_t ne03 = src0->ne[3]; - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - //const int ne12 = src1->ne[2]; - //const int ne13 = src1->ne[3]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + //const int64_t ne12 = src1->ne[2]; + //const int64_t ne13 = src1->ne[3]; - //const int ne0 = dst->ne[0]; - //const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; + //const int64_t ne0 = dst->ne[0]; + //const int64_t ne1 = dst->ne[1]; + //const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; + //const int64_t ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -7368,11 +7369,11 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i00*ew0 + i01] = src[i00]; } } @@ -7383,10 +7384,10 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; - for (int i11 = 0; i11 < ne11; i11++) { + for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); ggml_fp16_t * dst_data = wdata; - for (int i10 = 0; i10 < ne10; i10++) { + for (int64_t i10 = 0; i10 < ne10; i10++) { dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]); } } @@ -7411,7 +7412,7 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int i0 = 0; i0 < ne10; ++i0) { + for (int64_t i0 = 0; i0 < ne10; ++i0) { dst_data[i0] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; @@ -7437,21 +7438,21 @@ static void ggml_compute_forward_conv_1d_1s_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - //const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + //const int64_t ne03 = src0->ne[3]; - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - //const int ne12 = src1->ne[2]; - //const int ne13 = src1->ne[3]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + //const int64_t ne12 = src1->ne[2]; + //const int64_t ne13 = src1->ne[3]; - //const int ne0 = dst->ne[0]; - //const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; + //const int64_t ne0 = dst->ne[0]; + //const int64_t ne1 = dst->ne[1]; + //const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; + //const int64_t ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -7488,11 +7489,11 @@ static void ggml_compute_forward_conv_1d_1s_f32( { float * const wdata = (float *) params->wdata + 0; - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); float * dst_data = wdata + i02*ew0*ne00; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i00*ew0 + i01] = src[i00]; } } @@ -7503,10 +7504,10 @@ static void ggml_compute_forward_conv_1d_1s_f32( { float * const wdata = (float *) params->wdata + ne02*ew0*ne00; - for (int i11 = 0; i11 < ne11; i11++) { + for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); float * dst_data = wdata; - for (int i10 = 0; i10 < ne10; i10++) { + for (int64_t i10 = 0; i10 < ne10; i10++) { dst_data[(i10 + nh)*ew0 + i11] = src[i10]; } } @@ -7531,7 +7532,7 @@ static void ggml_compute_forward_conv_1d_1s_f32( for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int i0 = 0; i0 < ne10; ++i0) { + for (int64_t i0 = 0; i0 < ne10; ++i0) { dst_data[i0] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; @@ -7585,21 +7586,21 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - //const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + //const int64_t ne03 = src0->ne[3]; - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - //const int ne12 = src1->ne[2]; - //const int ne13 = src1->ne[3]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + //const int64_t ne12 = src1->ne[2]; + //const int64_t ne13 = src1->ne[3]; - //const int ne0 = dst->ne[0]; - //const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; + //const int64_t ne0 = dst->ne[0]; + //const int64_t ne1 = dst->ne[1]; + //const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; + //const int64_t ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -7636,11 +7637,11 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i00*ew0 + i01] = src[i00]; } } @@ -7651,10 +7652,10 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; - for (int i11 = 0; i11 < ne11; i11++) { + for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); ggml_fp16_t * dst_data = wdata; - for (int i10 = 0; i10 < ne10; i10++) { + for (int64_t i10 = 0; i10 < ne10; i10++) { dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]); } } @@ -7679,7 +7680,7 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int i0 = 0; i0 < ne10; i0 += 2) { + for (int64_t i0 = 0; i0 < ne10; i0 += 2) { dst_data[i0/2] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; @@ -7705,21 +7706,21 @@ static void ggml_compute_forward_conv_1d_2s_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int ne00 = src0->ne[0]; - const int ne01 = src0->ne[1]; - const int ne02 = src0->ne[2]; - //const int ne03 = src0->ne[3]; + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + //const int64_t ne03 = src0->ne[3]; - const int ne10 = src1->ne[0]; - const int ne11 = src1->ne[1]; - //const int ne12 = src1->ne[2]; - //const int ne13 = src1->ne[3]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + //const int64_t ne12 = src1->ne[2]; + //const int64_t ne13 = src1->ne[3]; - //const int ne0 = dst->ne[0]; - //const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; - //const int ne = ne0*ne1*ne2*ne3; + //const int64_t ne0 = dst->ne[0]; + //const int64_t ne1 = dst->ne[1]; + //const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; + //const int64_t ne = ne0*ne1*ne2*ne3; const int nb00 = src0->nb[0]; const int nb01 = src0->nb[1]; @@ -7756,11 +7757,11 @@ static void ggml_compute_forward_conv_1d_2s_f32( { float * const wdata = (float *) params->wdata + 0; - for (int i02 = 0; i02 < ne02; i02++) { - for (int i01 = 0; i01 < ne01; i01++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); float * dst_data = wdata + i02*ew0*ne00; - for (int i00 = 0; i00 < ne00; i00++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i00*ew0 + i01] = src[i00]; } } @@ -7771,10 +7772,10 @@ static void ggml_compute_forward_conv_1d_2s_f32( { float * const wdata = (float *) params->wdata + ne02*ew0*ne00; - for (int i11 = 0; i11 < ne11; i11++) { + for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); float * dst_data = wdata; - for (int i10 = 0; i10 < ne10; i10++) { + for (int64_t i10 = 0; i10 < ne10; i10++) { dst_data[(i10 + nh)*ew0 + i11] = src[i10]; } } @@ -7799,7 +7800,7 @@ static void ggml_compute_forward_conv_1d_2s_f32( for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int i0 = 0; i0 < ne10; i0 += 2) { + for (int64_t i0 = 0; i0 < ne10; i0 += 2) { dst_data[i0/2] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; @@ -7851,25 +7852,25 @@ static void ggml_compute_forward_flash_attn_f32( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int neq0 = q->ne[0]; - const int neq1 = q->ne[1]; - const int neq2 = q->ne[2]; - const int neq3 = q->ne[3]; + const int64_t neq0 = q->ne[0]; + const int64_t neq1 = q->ne[1]; + const int64_t neq2 = q->ne[2]; + const int64_t neq3 = q->ne[3]; - const int nek0 = k->ne[0]; - const int nek1 = k->ne[1]; - //const int nek2 = k->ne[2]; - //const int nek3 = k->ne[3]; + const int64_t nek0 = k->ne[0]; + const int64_t nek1 = k->ne[1]; + //const int64_t nek2 = k->ne[2]; + //const int64_t nek3 = k->ne[3]; - //const int nev0 = v->ne[0]; - const int nev1 = v->ne[1]; - //const int nev2 = v->ne[2]; - //const int nev3 = v->ne[3]; + //const int64_t nev0 = v->ne[0]; + const int64_t nev1 = v->ne[1]; + //const int64_t nev2 = v->ne[2]; + //const int64_t nev3 = v->ne[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + //const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; const int nbk0 = k->nb[0]; const int nbk1 = k->nb[1]; @@ -7894,10 +7895,10 @@ static void ggml_compute_forward_flash_attn_f32( const int ith = params->ith; const int nth = params->nth; - const int D = neq0; - const int N = neq1; - const int P = nek1 - N; - const int M = P + N; + const int64_t D = neq0; + const int64_t N = neq1; + const int64_t P = nek1 - N; + const int64_t M = P + N; const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); @@ -7959,7 +7960,7 @@ static void ggml_compute_forward_flash_attn_f32( S[i] = -INFINITY; } - for (int ic = 0; ic < nek1; ++ic) { + for (int64_t ic = 0; ic < nek1; ++ic) { // k indices const int ik3 = iq3; const int ik2 = iq2; @@ -7978,7 +7979,7 @@ static void ggml_compute_forward_flash_attn_f32( ggml_vec_scale_f32(nek1, S, scale); if (masked) { - for (int i = P; i < M; i++) { + for (int64_t i = P; i < M; i++) { if (i > P + iq1) { S[i] = -INFINITY; } @@ -8036,7 +8037,7 @@ static void ggml_compute_forward_flash_attn_f32( #endif } - for (int ic = 0; ic < nev1; ++ic) { + for (int64_t ic = 0; ic < nev1; ++ic) { // dst indices const int i1 = iq1; const int i2 = iq2; @@ -8060,25 +8061,25 @@ static void ggml_compute_forward_flash_attn_f16( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int neq0 = q->ne[0]; - const int neq1 = q->ne[1]; - const int neq2 = q->ne[2]; - const int neq3 = q->ne[3]; + const int64_t neq0 = q->ne[0]; + const int64_t neq1 = q->ne[1]; + const int64_t neq2 = q->ne[2]; + const int64_t neq3 = q->ne[3]; - const int nek0 = k->ne[0]; - const int nek1 = k->ne[1]; - //const int nek2 = k->ne[2]; - //const int nek3 = k->ne[3]; + const int64_t nek0 = k->ne[0]; + const int64_t nek1 = k->ne[1]; + //const int64_t nek2 = k->ne[2]; + //const int64_t nek3 = k->ne[3]; - //const int nev0 = v->ne[0]; - const int nev1 = v->ne[1]; - //const int nev2 = v->ne[2]; - //const int nev3 = v->ne[3]; + //const int64_t nev0 = v->ne[0]; + const int64_t nev1 = v->ne[1]; + //const int64_t nev2 = v->ne[2]; + //const int64_t nev3 = v->ne[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - //const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + //const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; const int nbk0 = k->nb[0]; const int nbk1 = k->nb[1]; @@ -8103,10 +8104,10 @@ static void ggml_compute_forward_flash_attn_f16( const int ith = params->ith; const int nth = params->nth; - const int D = neq0; - const int N = neq1; - const int P = nek1 - N; - const int M = P + N; + const int64_t D = neq0; + const int64_t N = neq1; + const int64_t P = nek1 - N; + const int64_t M = P + N; const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); @@ -8169,7 +8170,7 @@ static void ggml_compute_forward_flash_attn_f16( } if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) { - for (int ic = 0; ic < nek1; ++ic) { + for (int64_t ic = 0; ic < nek1; ++ic) { // k indices const int ik3 = iq3; const int ik2 = iq2; @@ -8184,7 +8185,7 @@ static void ggml_compute_forward_flash_attn_f16( (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } } else { - for (int ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) { + for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) { // k indices const int ik3 = iq3; const int ik2 = iq2; @@ -8204,7 +8205,7 @@ static void ggml_compute_forward_flash_attn_f16( ggml_vec_scale_f32(nek1, S, scale); if (masked) { - for (int i = P; i < M; i++) { + for (int64_t i = P; i < M; i++) { if (i > P + iq1) { S[i] = -INFINITY; } @@ -8264,12 +8265,12 @@ static void ggml_compute_forward_flash_attn_f16( ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup); - for (int i = 0; i < M; i++) { + for (int64_t i = 0; i < M; i++) { S16[i] = GGML_FP32_TO_FP16(S[i]); } if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) { - for (int ic = 0; ic < nev1; ++ic) { + for (int64_t ic = 0; ic < nev1; ++ic) { // dst indices const int i1 = iq1; const int i2 = iq2; @@ -8281,7 +8282,7 @@ static void ggml_compute_forward_flash_attn_f16( S16); } } else { - for (int ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) { + for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) { // dst indices const int i1 = iq1; const int i2 = iq2; @@ -8337,35 +8338,35 @@ static void ggml_compute_forward_flash_ff_f16( int64_t t0 = ggml_perf_time_us(); UNUSED(t0); - const int nea0 = a->ne[0]; - const int nea1 = a->ne[1]; - const int nea2 = a->ne[2]; - const int nea3 = a->ne[3]; + const int64_t nea0 = a->ne[0]; + const int64_t nea1 = a->ne[1]; + const int64_t nea2 = a->ne[2]; + const int64_t nea3 = a->ne[3]; - const int neb00 = b0->ne[0]; - const int neb01 = b0->ne[1]; - //const int neb02 = b0->ne[2]; - //const int neb03 = b0->ne[3]; + const int64_t neb00 = b0->ne[0]; + const int64_t neb01 = b0->ne[1]; + //const int64_t neb02 = b0->ne[2]; + //const int64_t neb03 = b0->ne[3]; - const int neb10 = b1->ne[0]; - const int neb11 = b1->ne[1]; - //const int neb12 = b1->ne[2]; - //const int neb13 = b1->ne[3]; + const int64_t neb10 = b1->ne[0]; + const int64_t neb11 = b1->ne[1]; + //const int64_t neb12 = b1->ne[2]; + //const int64_t neb13 = b1->ne[3]; - const int nec00 = c0->ne[0]; - const int nec01 = c0->ne[1]; - //const int nec02 = c0->ne[2]; - //const int nec03 = c0->ne[3]; + const int64_t nec00 = c0->ne[0]; + const int64_t nec01 = c0->ne[1]; + //const int64_t nec02 = c0->ne[2]; + //const int64_t nec03 = c0->ne[3]; - const int nec10 = c1->ne[0]; - const int nec11 = c1->ne[1]; - //const int nec12 = c1->ne[2]; - //const int nec13 = c1->ne[3]; + const int64_t nec10 = c1->ne[0]; + const int64_t nec11 = c1->ne[1]; + //const int64_t nec12 = c1->ne[2]; + //const int64_t nec13 = c1->ne[3]; - const int ne0 = dst->ne[0]; - const int ne1 = dst->ne[1]; - const int ne2 = dst->ne[2]; - //const int ne3 = dst->ne[3]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + const int64_t ne2 = dst->ne[2]; + //const int64_t ne3 = dst->ne[3]; const int nba0 = a->nb[0]; const int nba1 = a->nb[1]; @@ -8400,9 +8401,9 @@ static void ggml_compute_forward_flash_ff_f16( const int ith = params->ith; const int nth = params->nth; - const int D = nea0; - //const int N = nea1; - const int M = neb01; + const int64_t D = nea0; + //const int64_t N = nea1; + const int64_t M = neb01; GGML_ASSERT(ne0 == nea0); GGML_ASSERT(ne1 == nea1); @@ -8458,7 +8459,7 @@ static void ggml_compute_forward_flash_ff_f16( float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32); - for (int ic = 0; ic < neb01; ++ic) { + for (int64_t ic = 0; ic < neb01; ++ic) { // b0 indices const int ib03 = ia3; const int ib02 = ia2; @@ -8478,7 +8479,7 @@ static void ggml_compute_forward_flash_ff_f16( ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M); - for (int i = 0; i < M; i++) { + for (int64_t i = 0; i < M; i++) { S16[i] = GGML_FP32_TO_FP16(S[i]); } @@ -8490,7 +8491,7 @@ static void ggml_compute_forward_flash_ff_f16( const int i2 = ia2; const int i3 = ia3; - for (int ic = 0; ic < nec01; ++ic) { + for (int64_t ic = 0; ic < nec01; ++ic) { ggml_vec_dot_f16(neb01, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), @@ -9393,7 +9394,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) size_t cur = 0; - const int ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL); + const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL); if (node->src1->type == GGML_TYPE_F32) { cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1) @@ -9652,7 +9653,7 @@ void ggml_graph_print(const struct ggml_cgraph * cgraph) { perf_total_per_op_us[node->op] += node->perf_time_us; - GGML_PRINT(" - %3d: [ %6d, %6d, %6d] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", + GGML_PRINT(" - %3d: [ %" PRId64 ", %" PRId64 ", %" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", i, node->ne[0], node->ne[1], node->ne[2], GGML_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, @@ -9666,7 +9667,7 @@ void ggml_graph_print(const struct ggml_cgraph * cgraph) { for (int i = 0; i < cgraph->n_leafs; i++) { struct ggml_tensor * node = cgraph->leafs[i]; - GGML_PRINT(" - %3d: [ %6d, %6d] %8s\n", + GGML_PRINT(" - %3d: [ %" PRId64 ", %" PRId64 "] %8s\n", i, node->ne[0], node->ne[1], GGML_OP_LABEL[node->op]); @@ -9737,7 +9738,7 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph fprintf(fp, " \"%p\" [ \ style = filled; fillcolor = %s; shape = record; \ -label=\"%d [%d, %d] | %s", +label=\"%d [%" PRId64 ", %" PRId64 "] | %s", (void *) node, color, i, node->ne[0], node->ne[1], GGML_OP_SYMBOL[node->op]); @@ -9762,7 +9763,7 @@ label=\"%.1e\"; ]\n", } else { fprintf(fp, " \"%p\" [ \ style = filled; fillcolor = %s; shape = record; \ -label=\"CONST %d [%d, %d]\"; ]\n", +label=\"CONST %d [%" PRId64 ", %" PRId64 "]\"; ]\n", (void *) node, color, i, node->ne[0], node->ne[1]); } @@ -9826,9 +9827,9 @@ label=\"CONST %d [%d, %d]\"; ]\n", static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) { int i = 0; for (int p = 0; p < np; ++p) { - const int ne = ggml_nelements(ps[p]) ; + const int64_t ne = ggml_nelements(ps[p]) ; // TODO: add function to set tensor from array - for (int j = 0; j < ne; ++j) { + for (int64_t j = 0; j < ne; ++j) { ggml_set_f32_1d(ps[p], j, x[i++]); } } @@ -9837,9 +9838,9 @@ static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const f static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) { int i = 0; for (int p = 0; p < np; ++p) { - const int ne = ggml_nelements(ps[p]) ; + const int64_t ne = ggml_nelements(ps[p]) ; // TODO: add function to get all elements at once - for (int j = 0; j < ne; ++j) { + for (int64_t j = 0; j < ne; ++j) { x[i++] = ggml_get_f32_1d(ps[p], j); } } @@ -9848,9 +9849,9 @@ static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) { int i = 0; for (int p = 0; p < np; ++p) { - const int ne = ggml_nelements(ps[p]) ; + const int64_t ne = ggml_nelements(ps[p]) ; // TODO: add function to get all elements at once - for (int j = 0; j < ne; ++j) { + for (int64_t j = 0; j < ne; ++j) { g[i++] = ggml_get_f32_1d(ps[p]->grad, j); } } diff --git a/ggml.h b/ggml.h index f7791ed11..ad962b109 100644 --- a/ggml.h +++ b/ggml.h @@ -258,11 +258,11 @@ struct ggml_tensor { enum ggml_type type; int n_dims; - int ne[GGML_MAX_DIMS]; // number of elements - size_t nb[GGML_MAX_DIMS]; // stride in bytes: - // nb[0] = sizeof(type) - // nb[1] = nb[0] * ne[0] + padding - // nb[i] = nb[i-1] * ne[i-1] + int64_t ne[GGML_MAX_DIMS]; // number of elements + size_t nb[GGML_MAX_DIMS]; // stride in bytes: + // nb[0] = sizeof(type) + // nb[1] = nb[0] * ne[0] + padding + // nb[i] = nb[i-1] * ne[i-1] // compute data enum ggml_op op; @@ -328,8 +328,8 @@ int64_t ggml_cycles_per_ms(void); void ggml_print_object (const struct ggml_object * obj); void ggml_print_objects(const struct ggml_context * ctx); -int ggml_nelements(const struct ggml_tensor * tensor); -size_t ggml_nbytes (const struct ggml_tensor * tensor); +int64_t ggml_nelements(const struct ggml_tensor * tensor); +size_t ggml_nbytes (const struct ggml_tensor * tensor); int ggml_blck_size (enum ggml_type type); size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block @@ -355,33 +355,33 @@ struct ggml_tensor * ggml_new_tensor( struct ggml_context * ctx, enum ggml_type type, int n_dims, - const int *ne); + const int64_t *ne); struct ggml_tensor * ggml_new_tensor_1d( struct ggml_context * ctx, enum ggml_type type, - int ne0); + int64_t ne0); struct ggml_tensor * ggml_new_tensor_2d( struct ggml_context * ctx, enum ggml_type type, - int ne0, - int ne1); + int64_t ne0, + int64_t ne1); struct ggml_tensor * ggml_new_tensor_3d( struct ggml_context * ctx, enum ggml_type type, - int ne0, - int ne1, - int ne2); + int64_t ne0, + int64_t ne1, + int64_t ne2); struct ggml_tensor * ggml_new_tensor_4d( struct ggml_context * ctx, enum ggml_type type, - int ne0, - int ne1, - int ne2, - int ne3); + int64_t ne0, + int64_t ne1, + int64_t ne2, + int64_t ne3); struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); @@ -531,30 +531,30 @@ struct ggml_tensor * ggml_reshape( struct ggml_tensor * ggml_reshape_2d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1); + int64_t ne0, + int64_t ne1); // return view(a) // TODO: when we start computing gradient, make a copy instead of view struct ggml_tensor * ggml_reshape_3d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1, - int ne2); + int64_t ne0, + int64_t ne1, + int64_t ne2); // offset in bytes struct ggml_tensor * ggml_view_1d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, + int64_t ne0, size_t offset); struct ggml_tensor * ggml_view_2d( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1, + int64_t ne0, + int64_t ne1, size_t nb1, // row stride in bytes size_t offset); diff --git a/llama.cpp b/llama.cpp index 1b3157cd2..b0f53ca62 100644 --- a/llama.cpp +++ b/llama.cpp @@ -256,8 +256,8 @@ static bool kv_cache_init( const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_mem = n_layer*n_ctx; - const int n_elements = n_embd*n_mem; + const int64_t n_mem = (int64_t)n_layer*n_ctx; + const int64_t n_elements = n_embd*n_mem; cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB); @@ -679,7 +679,7 @@ static bool llama_model_load( return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%" PRId64 ", %" PRId64 "], expected [%d, %d]\n", __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); return false; } From e986f94829bae0b9e66b326acbbba179931c84f1 Mon Sep 17 00:00:00 2001 From: Christian Falch <875252+chrfalch@users.noreply.github.com> Date: Sun, 2 Apr 2023 12:23:04 +0200 Subject: [PATCH 74/76] Added api for getting/setting the kv_cache (#685) The api provides access methods for retrieving the current memory buffer for the kv_cache and its token number. It also contains a method for setting the kv_cache from a memory buffer. This makes it possible to load/save history - maybe support --cache-prompt paramater as well? Co-authored-by: Pavol Rusnak --- llama.cpp | 27 +++++++++++++++++++++++++++ llama.h | 17 +++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/llama.cpp b/llama.cpp index b0f53ca62..878907185 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1668,6 +1668,33 @@ int llama_model_quantize( return 0; } +// Returns the KV cache that will contain the context for the +// ongoing prediction with the model. +const uint8_t * llama_get_kv_cache(struct llama_context * ctx) { + return ctx->model.kv_self.buf.data(); +} + +// Returns the size of the KV cache +size_t llama_get_kv_cache_size(struct llama_context * ctx) { + return ctx->model.kv_self.buf.size(); +} + +int llama_get_kv_cache_token_count(struct llama_context * ctx) { + return ctx->model.kv_self.n; +} + +// Sets the KV cache containing the current context for the model +void llama_set_kv_cache( + struct llama_context * ctx, + const uint8_t * kv_cache, + size_t n_size, + int n_token_count) { + // Make sure we have the same kv cache setup + LLAMA_ASSERT(ctx->model.kv_self.buf.size() == n_size); + memcpy(ctx->model.kv_self.buf.data(), kv_cache, n_size); + ctx->model.kv_self.n = n_token_count; +} + int llama_eval( struct llama_context * ctx, const llama_token * tokens, diff --git a/llama.h b/llama.h index 258de5a94..04e2bf71c 100644 --- a/llama.h +++ b/llama.h @@ -83,6 +83,23 @@ extern "C" { const char * fname_out, int itype); + // Returns the KV cache that will contain the context for the + // ongoing prediction with the model. + LLAMA_API const uint8_t * llama_get_kv_cache(struct llama_context * ctx); + + // Returns the size of the KV cache + LLAMA_API size_t llama_get_kv_cache_size(struct llama_context * ctx); + + // Returns the number of tokens in the KV cache + LLAMA_API int llama_get_kv_cache_token_count(struct llama_context * ctx); + + // Sets the KV cache containing the current context for the model + LLAMA_API void llama_set_kv_cache( + struct llama_context * ctx, + const uint8_t * kv_cache, + size_t n_size, + int n_token_count); + // Run the llama inference to obtain the logits and probabilities for the next token. // tokens + n_tokens is the provided batch of new tokens to process // n_past is the number of tokens to use from previous eval calls From d8d4e865cd481b18f10508ffee35db903767ef5c Mon Sep 17 00:00:00 2001 From: Thatcher Chamberlin Date: Sun, 2 Apr 2023 06:48:57 -0400 Subject: [PATCH 75/76] Add a missing step to the gpt4all instructions (#690) `migrate-ggml-2023-03-30-pr613.py` is needed to get gpt4all running. --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f5744eacc..508d315d5 100644 --- a/README.md +++ b/README.md @@ -232,13 +232,15 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach. - Obtain the `gpt4all-lora-quantized.bin` model - It is distributed in the old `ggml` format which is now obsoleted -- You have to convert it to the new format using [./convert-gpt4all-to-ggml.py](./convert-gpt4all-to-ggml.py): +- You have to convert it to the new format using [./convert-gpt4all-to-ggml.py](./convert-gpt4all-to-ggml.py). You may also need to +convert the model from the old format to the new format with [./migrate-ggml-2023-03-30-pr613.py](./migrate-ggml-2023-03-30-pr613.py): ```bash python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model + python3 migrate-ggml-2023-03-30-pr613.py models/gpt4all-7B/gpt4all-lora-quantized.bin models/gpt4all-7B/gpt4all-lora-quantized-new.bin ``` -- You can now use the newly generated `gpt4all-lora-quantized.bin` model in exactly the same way as all other models +- You can now use the newly generated `gpt4all-lora-quantized-new.bin` model in exactly the same way as all other models - The original model is saved in the same folder with a suffix `.orig` ### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data From a0c05164168297c04737936ad0cad849a512547a Mon Sep 17 00:00:00 2001 From: bsilvereagle Date: Sun, 2 Apr 2023 15:13:03 -0700 Subject: [PATCH 76/76] Remove torch GPU dependencies from the Docker.full image (#665) By using `pip install torch --index-url https://download.pytorch.org/whl/cpu` instead of `pip install torch` we can specify we want to install a CPU-only version of PyTorch without any GPU dependencies. This reduces the size of the Docker image from 7.32 GB to 1.62 GB --- .devops/full.Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.devops/full.Dockerfile b/.devops/full.Dockerfile index 2b3a20c63..a75bc976f 100644 --- a/.devops/full.Dockerfile +++ b/.devops/full.Dockerfile @@ -6,7 +6,8 @@ RUN apt-get update && \ apt-get install -y build-essential python3 python3-pip RUN pip install --upgrade pip setuptools wheel \ - && pip install numpy requests sentencepiece torch tqdm + && pip install numpy requests sentencepiece tqdm \ + && pip install torch --index-url https://download.pytorch.org/whl/cpu WORKDIR /app