Merge branch 'master' of https://github.com/ggerganov/llama.cpp
This commit is contained in:
commit
ddafa03a30
199 changed files with 73633 additions and 49017 deletions
|
@ -2,6 +2,14 @@ ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
|
||||||
|
|
||||||
|
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||||
|
chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||||
|
rm /etc/apt/sources.list.d/intel-graphics.list && \
|
||||||
|
wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
|
||||||
|
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
|
||||||
|
chmod 644 /usr/share/keyrings/intel-graphics.gpg
|
||||||
|
|
||||||
ARG LLAMA_SYCL_F16=OFF
|
ARG LLAMA_SYCL_F16=OFF
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y git
|
apt-get install -y git
|
||||||
|
|
|
@ -214,7 +214,6 @@ effectiveStdenv.mkDerivation (
|
||||||
(cmakeBool "LLAMA_CUDA" useCuda)
|
(cmakeBool "LLAMA_CUDA" useCuda)
|
||||||
(cmakeBool "LLAMA_HIPBLAS" useRocm)
|
(cmakeBool "LLAMA_HIPBLAS" useRocm)
|
||||||
(cmakeBool "LLAMA_METAL" useMetalKit)
|
(cmakeBool "LLAMA_METAL" useMetalKit)
|
||||||
(cmakeBool "LLAMA_MPI" useMpi)
|
|
||||||
(cmakeBool "LLAMA_VULKAN" useVulkan)
|
(cmakeBool "LLAMA_VULKAN" useVulkan)
|
||||||
(cmakeBool "LLAMA_STATIC" enableStatic)
|
(cmakeBool "LLAMA_STATIC" enableStatic)
|
||||||
]
|
]
|
||||||
|
@ -227,20 +226,20 @@ effectiveStdenv.mkDerivation (
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
++ optionals useRocm [
|
++ optionals useRocm [
|
||||||
(cmakeFeature "CMAKE_C_COMPILER" "hipcc")
|
(cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.llvm.clang}/bin/clang")
|
||||||
(cmakeFeature "CMAKE_CXX_COMPILER" "hipcc")
|
(cmakeFeature "CMAKE_HIP_ARCHITECTURES" (builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets))
|
||||||
|
|
||||||
# Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM
|
|
||||||
# in https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt
|
|
||||||
# and select the line that matches the current nixpkgs version of rocBLAS.
|
|
||||||
# Should likely use `rocmPackages.clr.gpuTargets`.
|
|
||||||
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
|
||||||
]
|
]
|
||||||
++ optionals useMetalKit [
|
++ optionals useMetalKit [
|
||||||
(lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
|
(lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
|
||||||
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
|
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
|
||||||
];
|
];
|
||||||
|
|
||||||
|
# Environment variables needed for ROCm
|
||||||
|
env = optionals useRocm {
|
||||||
|
ROCM_PATH = "${rocmPackages.clr}";
|
||||||
|
HIP_DEVICE_LIB_PATH = "${rocmPackages.rocm-device-libs}/amdgcn/bitcode";
|
||||||
|
};
|
||||||
|
|
||||||
# TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
|
# TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
|
||||||
# if they haven't been added yet.
|
# if they haven't been added yet.
|
||||||
postInstall = ''
|
postInstall = ''
|
||||||
|
|
|
@ -2,6 +2,14 @@ ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
|
||||||
|
|
||||||
|
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||||
|
chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||||
|
rm /etc/apt/sources.list.d/intel-graphics.list && \
|
||||||
|
wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
|
||||||
|
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
|
||||||
|
chmod 644 /usr/share/keyrings/intel-graphics.gpg
|
||||||
|
|
||||||
ARG LLAMA_SYCL_F16=OFF
|
ARG LLAMA_SYCL_F16=OFF
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y git libcurl4-openssl-dev
|
apt-get install -y git libcurl4-openssl-dev
|
||||||
|
@ -19,6 +27,14 @@ RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
||||||
|
|
||||||
|
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||||
|
chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||||
|
rm /etc/apt/sources.list.d/intel-graphics.list && \
|
||||||
|
wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
|
||||||
|
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
|
||||||
|
chmod 644 /usr/share/keyrings/intel-graphics.gpg
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y libcurl4-openssl-dev
|
apt-get install -y libcurl4-openssl-dev
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ arg1="$1"
|
||||||
shift
|
shift
|
||||||
|
|
||||||
if [[ "$arg1" == '--convert' || "$arg1" == '-c' ]]; then
|
if [[ "$arg1" == '--convert' || "$arg1" == '-c' ]]; then
|
||||||
python3 ./convert.py "$@"
|
python3 ./convert-hf-to-gguf.py "$@"
|
||||||
elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then
|
elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then
|
||||||
./quantize "$@"
|
./quantize "$@"
|
||||||
elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then
|
elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then
|
||||||
|
|
50
.github/ISSUE_TEMPLATE/01-bug-low.yml
vendored
Normal file
50
.github/ISSUE_TEMPLATE/01-bug-low.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
name: Low Severity Bugs
|
||||||
|
description: Used to report low severity bugs in llama.cpp (e.g. cosmetic issues, non critical UI glitches)
|
||||||
|
title: "Bug: "
|
||||||
|
labels: ["bug-unconfirmed", "low severity"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
Please include information about your system, the steps to reproduce the bug,
|
||||||
|
and the version of llama.cpp that you are using.
|
||||||
|
If possible, please provide a minimal code example that reproduces the bug.
|
||||||
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
placeholder: Tell us what you see!
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: Name and Version
|
||||||
|
description: Which executable and which version of our software are you running? (use `--version` to get a version string)
|
||||||
|
placeholder: |
|
||||||
|
$./main --version
|
||||||
|
version: 2999 (42b4109e)
|
||||||
|
built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: operating-system
|
||||||
|
attributes:
|
||||||
|
label: What operating system are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- Mac
|
||||||
|
- Windows
|
||||||
|
- BSD
|
||||||
|
- Other? (Please let us know in description)
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
50
.github/ISSUE_TEMPLATE/02-bug-medium.yml
vendored
Normal file
50
.github/ISSUE_TEMPLATE/02-bug-medium.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
name: Medium Severity Bug
|
||||||
|
description: Used to report medium severity bugs in llama.cpp (e.g. Malfunctioning Features but generally still useable)
|
||||||
|
title: "Bug: "
|
||||||
|
labels: ["bug-unconfirmed", "medium severity"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
Please include information about your system, the steps to reproduce the bug,
|
||||||
|
and the version of llama.cpp that you are using.
|
||||||
|
If possible, please provide a minimal code example that reproduces the bug.
|
||||||
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
placeholder: Tell us what you see!
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: Name and Version
|
||||||
|
description: Which executable and which version of our software are you running? (use `--version` to get a version string)
|
||||||
|
placeholder: |
|
||||||
|
$./main --version
|
||||||
|
version: 2999 (42b4109e)
|
||||||
|
built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: operating-system
|
||||||
|
attributes:
|
||||||
|
label: What operating system are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- Mac
|
||||||
|
- Windows
|
||||||
|
- BSD
|
||||||
|
- Other? (Please let us know in description)
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
50
.github/ISSUE_TEMPLATE/03-bug-high.yml
vendored
Normal file
50
.github/ISSUE_TEMPLATE/03-bug-high.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
name: High Severity Bug
|
||||||
|
description: Used to report high severity bugs in llama.cpp (e.g. Malfunctioning features hindering important common workflow)
|
||||||
|
title: "Bug: "
|
||||||
|
labels: ["bug-unconfirmed", "high severity"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
Please include information about your system, the steps to reproduce the bug,
|
||||||
|
and the version of llama.cpp that you are using.
|
||||||
|
If possible, please provide a minimal code example that reproduces the bug.
|
||||||
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
placeholder: Tell us what you see!
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: Name and Version
|
||||||
|
description: Which executable and which version of our software are you running? (use `--version` to get a version string)
|
||||||
|
placeholder: |
|
||||||
|
$./main --version
|
||||||
|
version: 2999 (42b4109e)
|
||||||
|
built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: operating-system
|
||||||
|
attributes:
|
||||||
|
label: What operating system are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- Mac
|
||||||
|
- Windows
|
||||||
|
- BSD
|
||||||
|
- Other? (Please let us know in description)
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
50
.github/ISSUE_TEMPLATE/04-bug-critical.yml
vendored
Normal file
50
.github/ISSUE_TEMPLATE/04-bug-critical.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
name: Critical Severity Bug
|
||||||
|
description: Used to report critical severity bugs in llama.cpp (e.g. Crashing, Corrupted, Dataloss)
|
||||||
|
title: "Bug: "
|
||||||
|
labels: ["bug-unconfirmed", "critical severity"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
Please include information about your system, the steps to reproduce the bug,
|
||||||
|
and the version of llama.cpp that you are using.
|
||||||
|
If possible, please provide a minimal code example that reproduces the bug.
|
||||||
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
placeholder: Tell us what you see!
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: Name and Version
|
||||||
|
description: Which executable and which version of our software are you running? (use `--version` to get a version string)
|
||||||
|
placeholder: |
|
||||||
|
$./main --version
|
||||||
|
version: 2999 (42b4109e)
|
||||||
|
built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: operating-system
|
||||||
|
attributes:
|
||||||
|
label: What operating system are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- Mac
|
||||||
|
- Windows
|
||||||
|
- BSD
|
||||||
|
- Other? (Please let us know in description)
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
51
.github/ISSUE_TEMPLATE/05-enhancement.yml
vendored
Normal file
51
.github/ISSUE_TEMPLATE/05-enhancement.yml
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
name: Enhancement
|
||||||
|
description: Used to request enhancements for llama.cpp
|
||||||
|
title: "Feature Request: "
|
||||||
|
labels: ["enhancement"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
[Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggerganov/llama.cpp/discussions/categories/ideas)
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: prerequisites
|
||||||
|
attributes:
|
||||||
|
label: Prerequisites
|
||||||
|
description: Please confirm the following before submitting your enhancement request.
|
||||||
|
options:
|
||||||
|
- label: I am running the latest code. Mention the version if possible as well.
|
||||||
|
required: true
|
||||||
|
- label: I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
|
||||||
|
required: true
|
||||||
|
- label: I searched using keywords relevant to my issue to make sure that I am creating a new issue that is not already open (or closed).
|
||||||
|
required: true
|
||||||
|
- label: I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new and useful enhancement to share.
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: feature-description
|
||||||
|
attributes:
|
||||||
|
label: Feature Description
|
||||||
|
description: Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do as an enhancement.
|
||||||
|
placeholder: Detailed description of the enhancement
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: motivation
|
||||||
|
attributes:
|
||||||
|
label: Motivation
|
||||||
|
description: Please provide a detailed written description of reasons why this feature is necessary and how it is useful to `llama.cpp` users.
|
||||||
|
placeholder: Explanation of why this feature is needed and its benefits
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: possible-implementation
|
||||||
|
attributes:
|
||||||
|
label: Possible Implementation
|
||||||
|
description: If you have an idea as to how it can be implemented, please write a detailed description. Feel free to give links to external sources or share visuals that might be helpful to understand the details better.
|
||||||
|
placeholder: Detailed description of potential implementation
|
||||||
|
validations:
|
||||||
|
required: false
|
52
.github/ISSUE_TEMPLATE/06-research.yml
vendored
Normal file
52
.github/ISSUE_TEMPLATE/06-research.yml
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
name: Research
|
||||||
|
description: Track new technical research area
|
||||||
|
title: "Research: "
|
||||||
|
labels: ["research 🔬"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Don't forget to check for any [duplicate research issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: research-stage
|
||||||
|
attributes:
|
||||||
|
label: Research Stage
|
||||||
|
description: Track general state of this research ticket
|
||||||
|
options:
|
||||||
|
- label: Background Research (Let's try to avoid reinventing the wheel)
|
||||||
|
- label: Hypothesis Formed (How do you think this will work and it's effect?)
|
||||||
|
- label: Strategy / Implementation Forming
|
||||||
|
- label: Analysis of results
|
||||||
|
- label: Debrief / Documentation (So people in the future can learn from us)
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: background
|
||||||
|
attributes:
|
||||||
|
label: Previous existing literature and research
|
||||||
|
description: Whats the current state of the art and whats the motivation for this research?
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: hypothesis
|
||||||
|
attributes:
|
||||||
|
label: Hypothesis
|
||||||
|
description: How do you think this will work and it's effect?
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: implementation
|
||||||
|
attributes:
|
||||||
|
label: Implementation
|
||||||
|
description: Got an approach? e.g. a PR ready to go?
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: analysis
|
||||||
|
attributes:
|
||||||
|
label: Analysis
|
||||||
|
description: How does the proposed implementation behave?
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
28
.github/ISSUE_TEMPLATE/07-refactor.yml
vendored
Normal file
28
.github/ISSUE_TEMPLATE/07-refactor.yml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
name: Refactor (Maintainers)
|
||||||
|
description: Used to track refactoring opportunities
|
||||||
|
title: "Refactor: "
|
||||||
|
labels: ["refactor"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Don't forget to [check for existing refactor issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered.
|
||||||
|
Also you may want to check [Pull request refactor label as well](https://github.com/ggerganov/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too.
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: background-description
|
||||||
|
attributes:
|
||||||
|
label: Background Description
|
||||||
|
description: Please provide a detailed written description of the pain points you are trying to solve.
|
||||||
|
placeholder: Detailed description behind your motivation to request refactor
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: possible-approaches
|
||||||
|
attributes:
|
||||||
|
label: Possible Refactor Approaches
|
||||||
|
description: If you have some idea of possible approaches to solve this problem. You may want to make it a todo list.
|
||||||
|
placeholder: Your idea of possible refactoring opportunity/approaches
|
||||||
|
validations:
|
||||||
|
required: false
|
11
.github/ISSUE_TEMPLATE/bug.md
vendored
11
.github/ISSUE_TEMPLATE/bug.md
vendored
|
@ -1,11 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug template
|
|
||||||
about: Used to report bugs in llama.cpp
|
|
||||||
labels: ["bug-unconfirmed"]
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Please include information about your system, the steps to reproduce the bug, and the version of llama.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug.
|
|
||||||
|
|
||||||
If the bug concerns the server, please try to reproduce it first using the [server test scenario framework](https://github.com/ggerganov/llama.cpp/tree/master/examples/server/tests).
|
|
13
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
13
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: Got an idea?
|
||||||
|
url: https://github.com/ggerganov/llama.cpp/discussions/categories/ideas
|
||||||
|
about: Pop it there. It may then become an enhancement ticket.
|
||||||
|
- name: Got a question?
|
||||||
|
url: https://github.com/ggerganov/llama.cpp/discussions/categories/q-a
|
||||||
|
about: Ask a question there!
|
||||||
|
- name: Want to contribute?
|
||||||
|
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
|
||||||
|
about: Head to the contribution guide page of the wiki for areas you can help with
|
||||||
|
|
||||||
|
|
28
.github/ISSUE_TEMPLATE/enhancement.md
vendored
28
.github/ISSUE_TEMPLATE/enhancement.md
vendored
|
@ -1,28 +0,0 @@
|
||||||
---
|
|
||||||
name: Enhancement template
|
|
||||||
about: Used to request enhancements for llama.cpp
|
|
||||||
labels: ["enhancement"]
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# Prerequisites
|
|
||||||
|
|
||||||
Please answer the following questions for yourself before submitting an issue.
|
|
||||||
|
|
||||||
- [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now.
|
|
||||||
- [ ] I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
|
|
||||||
- [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed).
|
|
||||||
- [ ] I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new bug or useful enhancement to share.
|
|
||||||
|
|
||||||
# Feature Description
|
|
||||||
|
|
||||||
Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do as an enhancement.
|
|
||||||
|
|
||||||
# Motivation
|
|
||||||
|
|
||||||
Please provide a detailed written description of reasons why this feature is necessary and how it is useful to `llama.cpp` users.
|
|
||||||
|
|
||||||
# Possible Implementation
|
|
||||||
|
|
||||||
If you have an idea as to how it can be implemented, please write a detailed description. Feel free to give links to external sources or share visuals that might be helpful to understand the details better.
|
|
90
.github/labeler.yml
vendored
Normal file
90
.github/labeler.yml
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
# https://github.com/actions/labeler
|
||||||
|
Kompute:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml-kompute.h
|
||||||
|
- ggml-kompute.cpp
|
||||||
|
- README-kompute.md
|
||||||
|
Apple Metal:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml-metal.h
|
||||||
|
- ggml-metal.cpp
|
||||||
|
- README-metal.md
|
||||||
|
SYCL:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml-sycl.h
|
||||||
|
- ggml-sycl.cpp
|
||||||
|
- README-sycl.md
|
||||||
|
Nvidia GPU:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml-cuda.h
|
||||||
|
- ggml-cuda/**
|
||||||
|
Vulkan:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml_vk_generate_shaders.py
|
||||||
|
- ggml-vulkan*
|
||||||
|
documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- docs/**
|
||||||
|
- media/**
|
||||||
|
testing:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- tests/**
|
||||||
|
build:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- cmake/**
|
||||||
|
- CMakeLists.txt
|
||||||
|
- CMakePresets.json
|
||||||
|
- codecov.yml
|
||||||
|
examples:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: examples/**
|
||||||
|
devops:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- .devops/**
|
||||||
|
- .github/**
|
||||||
|
- ci/**
|
||||||
|
python:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- "**/*.py"
|
||||||
|
- requirements/**
|
||||||
|
- gguf-py/**
|
||||||
|
- .flake8
|
||||||
|
script:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- scripts/**
|
||||||
|
android:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- examples/llama.android/**
|
||||||
|
server:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- examples/server/**
|
||||||
|
ggml:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml.c
|
||||||
|
- ggml.h
|
||||||
|
- ggml-*.c
|
||||||
|
- ggml-*.h
|
||||||
|
- ggml-cuda/**
|
||||||
|
nix:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- "**/*.nix"
|
||||||
|
- .github/workflows/nix-*.yml
|
||||||
|
- .devops/nix/nixpkgs-instances.nix
|
||||||
|
embedding:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: examples/embedding/
|
208
.github/workflows/build.yml
vendored
208
.github/workflows/build.yml
vendored
|
@ -271,49 +271,15 @@ jobs:
|
||||||
path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip
|
path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip
|
||||||
name: llama-bin-ubuntu-x64.zip
|
name: llama-bin-ubuntu-x64.zip
|
||||||
|
|
||||||
# ubuntu-latest-cmake-sanitizer:
|
ubuntu-latest-cmake-sanitizer:
|
||||||
# runs-on: ubuntu-latest
|
|
||||||
#
|
|
||||||
# continue-on-error: true
|
|
||||||
#
|
|
||||||
# strategy:
|
|
||||||
# matrix:
|
|
||||||
# sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
|
||||||
# build_type: [Debug, Release]
|
|
||||||
#
|
|
||||||
# steps:
|
|
||||||
# - name: Clone
|
|
||||||
# id: checkout
|
|
||||||
# uses: actions/checkout@v4
|
|
||||||
#
|
|
||||||
# - name: Dependencies
|
|
||||||
# id: depends
|
|
||||||
# run: |
|
|
||||||
# sudo apt-get update
|
|
||||||
# sudo apt-get install build-essential
|
|
||||||
#
|
|
||||||
# - name: Build
|
|
||||||
# id: cmake_build
|
|
||||||
# run: |
|
|
||||||
# mkdir build
|
|
||||||
# cd build
|
|
||||||
# cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
|
|
||||||
# cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
|
|
||||||
#
|
|
||||||
# - name: Test
|
|
||||||
# id: cmake_test
|
|
||||||
# run: |
|
|
||||||
# cd build
|
|
||||||
# ctest -L main --verbose --timeout 900
|
|
||||||
|
|
||||||
ubuntu-latest-cmake-mpi:
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
mpi_library: [mpich, libopenmpi-dev]
|
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||||
|
build_type: [Debug, Release]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
@ -324,14 +290,44 @@ jobs:
|
||||||
id: depends
|
id: depends
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential ${{ matrix.mpi_library }}
|
sudo apt-get install build-essential
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
id: cmake_build
|
id: cmake_build
|
||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -DLLAMA_MPI=ON ..
|
cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
|
||||||
|
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
id: cmake_test
|
||||||
|
run: |
|
||||||
|
cd build
|
||||||
|
ctest -L main --verbose --timeout 900
|
||||||
|
|
||||||
|
ubuntu-latest-cmake-rpc:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DLLAMA_RPC=ON ..
|
||||||
cmake --build . --config Release -j $(nproc)
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
|
@ -362,6 +358,33 @@ jobs:
|
||||||
cmake -DLLAMA_VULKAN=ON ..
|
cmake -DLLAMA_VULKAN=ON ..
|
||||||
cmake --build . --config Release -j $(nproc)
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
|
ubuntu-22-cmake-hip:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
container: rocm/dev-ubuntu-22.04:6.0.2
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y build-essential git cmake rocblas-dev hipblas-dev
|
||||||
|
|
||||||
|
- name: Build with native CMake HIP support
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DLLAMA_HIPBLAS=ON
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
- name: Build with legacy HIP support
|
||||||
|
id: cmake_build_legacy_hip
|
||||||
|
run: |
|
||||||
|
cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DLLAMA_HIPBLAS=ON
|
||||||
|
cmake --build build2 --config Release -j $(nproc)
|
||||||
|
|
||||||
ubuntu-22-cmake-sycl:
|
ubuntu-22-cmake-sycl:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
@ -663,24 +686,28 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- build: 'noavx'
|
- build: 'rpc-x64'
|
||||||
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_RPC=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
|
- build: 'noavx-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'avx2'
|
- build: 'avx2-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'avx'
|
- build: 'avx-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'avx512'
|
- build: 'avx512-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'clblast'
|
- build: 'clblast-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"'
|
||||||
- build: 'openblas'
|
- build: 'openblas-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
|
||||||
- build: 'kompute'
|
- build: 'kompute-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'vulkan'
|
- build: 'vulkan-x64'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'arm64'
|
- build: 'llvm-arm64'
|
||||||
defines: '-A ARM64 -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
|
- build: 'msvc-arm64'
|
||||||
|
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
@ -691,13 +718,13 @@ jobs:
|
||||||
|
|
||||||
- name: Clone Kompute submodule
|
- name: Clone Kompute submodule
|
||||||
id: clone_kompute
|
id: clone_kompute
|
||||||
if: ${{ matrix.build == 'kompute' }}
|
if: ${{ matrix.build == 'kompute-x64' }}
|
||||||
run: |
|
run: |
|
||||||
git submodule update --init kompute
|
git submodule update --init kompute
|
||||||
|
|
||||||
- name: Download OpenCL SDK
|
- name: Download OpenCL SDK
|
||||||
id: get_opencl
|
id: get_opencl
|
||||||
if: ${{ matrix.build == 'clblast' }}
|
if: ${{ matrix.build == 'clblast-x64' }}
|
||||||
run: |
|
run: |
|
||||||
curl.exe -o $env:RUNNER_TEMP/opencl.zip -L "https://github.com/KhronosGroup/OpenCL-SDK/releases/download/v${env:OPENCL_VERSION}/OpenCL-SDK-v${env:OPENCL_VERSION}-Win-x64.zip"
|
curl.exe -o $env:RUNNER_TEMP/opencl.zip -L "https://github.com/KhronosGroup/OpenCL-SDK/releases/download/v${env:OPENCL_VERSION}/OpenCL-SDK-v${env:OPENCL_VERSION}-Win-x64.zip"
|
||||||
mkdir $env:RUNNER_TEMP/opencl
|
mkdir $env:RUNNER_TEMP/opencl
|
||||||
|
@ -705,7 +732,7 @@ jobs:
|
||||||
|
|
||||||
- name: Download CLBlast
|
- name: Download CLBlast
|
||||||
id: get_clblast
|
id: get_clblast
|
||||||
if: ${{ matrix.build == 'clblast' }}
|
if: ${{ matrix.build == 'clblast-x64' }}
|
||||||
run: |
|
run: |
|
||||||
curl.exe -o $env:RUNNER_TEMP/clblast.7z -L "https://github.com/CNugteren/CLBlast/releases/download/${env:CLBLAST_VERSION}/CLBlast-${env:CLBLAST_VERSION}-windows-x64.7z"
|
curl.exe -o $env:RUNNER_TEMP/clblast.7z -L "https://github.com/CNugteren/CLBlast/releases/download/${env:CLBLAST_VERSION}/CLBlast-${env:CLBLAST_VERSION}-windows-x64.7z"
|
||||||
curl.exe -o $env:RUNNER_TEMP/CLBlast.LICENSE.txt -L "https://github.com/CNugteren/CLBlast/raw/${env:CLBLAST_VERSION}/LICENSE"
|
curl.exe -o $env:RUNNER_TEMP/CLBlast.LICENSE.txt -L "https://github.com/CNugteren/CLBlast/raw/${env:CLBLAST_VERSION}/LICENSE"
|
||||||
|
@ -718,7 +745,7 @@ jobs:
|
||||||
|
|
||||||
- name: Download OpenBLAS
|
- name: Download OpenBLAS
|
||||||
id: get_openblas
|
id: get_openblas
|
||||||
if: ${{ matrix.build == 'openblas' }}
|
if: ${{ matrix.build == 'openblas-x64' }}
|
||||||
run: |
|
run: |
|
||||||
curl.exe -o $env:RUNNER_TEMP/openblas.zip -L "https://github.com/xianyi/OpenBLAS/releases/download/v${env:OPENBLAS_VERSION}/OpenBLAS-${env:OPENBLAS_VERSION}-x64.zip"
|
curl.exe -o $env:RUNNER_TEMP/openblas.zip -L "https://github.com/xianyi/OpenBLAS/releases/download/v${env:OPENBLAS_VERSION}/OpenBLAS-${env:OPENBLAS_VERSION}-x64.zip"
|
||||||
curl.exe -o $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt -L "https://github.com/xianyi/OpenBLAS/raw/v${env:OPENBLAS_VERSION}/LICENSE"
|
curl.exe -o $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt -L "https://github.com/xianyi/OpenBLAS/raw/v${env:OPENBLAS_VERSION}/LICENSE"
|
||||||
|
@ -731,38 +758,41 @@ jobs:
|
||||||
|
|
||||||
- name: Install Vulkan SDK
|
- name: Install Vulkan SDK
|
||||||
id: get_vulkan
|
id: get_vulkan
|
||||||
if: ${{ matrix.build == 'kompute' || matrix.build == 'vulkan' }}
|
if: ${{ matrix.build == 'kompute-x64' || matrix.build == 'vulkan-x64' }}
|
||||||
run: |
|
run: |
|
||||||
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe"
|
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe"
|
||||||
& "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
|
& "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
|
||||||
Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
|
Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
|
||||||
Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
|
Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
|
||||||
|
|
||||||
|
- name: Install Ninja
|
||||||
|
id: install_ninja
|
||||||
|
run: |
|
||||||
|
choco install ninja
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
id: cmake_build
|
id: cmake_build
|
||||||
run: |
|
run: |
|
||||||
mkdir build
|
cmake -S . -B build ${{ matrix.defines }}
|
||||||
cd build
|
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS}
|
||||||
cmake .. ${{ matrix.defines }}
|
|
||||||
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
|
|
||||||
|
|
||||||
- name: Add clblast.dll
|
- name: Add clblast.dll
|
||||||
id: add_clblast_dll
|
id: add_clblast_dll
|
||||||
if: ${{ matrix.build == 'clblast' }}
|
if: ${{ matrix.build == 'clblast-x64' }}
|
||||||
run: |
|
run: |
|
||||||
cp $env:RUNNER_TEMP/clblast/lib/clblast.dll ./build/bin/Release
|
cp $env:RUNNER_TEMP/clblast/lib/clblast.dll ./build/bin/Release
|
||||||
cp $env:RUNNER_TEMP/CLBlast.LICENSE.txt ./build/bin/Release/CLBlast-${env:CLBLAST_VERSION}.txt
|
cp $env:RUNNER_TEMP/CLBlast.LICENSE.txt ./build/bin/Release/CLBlast-${env:CLBLAST_VERSION}.txt
|
||||||
|
|
||||||
- name: Add libopenblas.dll
|
- name: Add libopenblas.dll
|
||||||
id: add_libopenblas_dll
|
id: add_libopenblas_dll
|
||||||
if: ${{ matrix.build == 'openblas' }}
|
if: ${{ matrix.build == 'openblas-x64' }}
|
||||||
run: |
|
run: |
|
||||||
cp $env:RUNNER_TEMP/openblas/bin/libopenblas.dll ./build/bin/Release/openblas.dll
|
cp $env:RUNNER_TEMP/openblas/bin/libopenblas.dll ./build/bin/Release/openblas.dll
|
||||||
cp $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt ./build/bin/Release/OpenBLAS-${env:OPENBLAS_VERSION}.txt
|
cp $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt ./build/bin/Release/OpenBLAS-${env:OPENBLAS_VERSION}.txt
|
||||||
|
|
||||||
- name: Check AVX512F support
|
- name: Check AVX512F support
|
||||||
id: check_avx512f
|
id: check_avx512f
|
||||||
if: ${{ matrix.build == 'avx512' }}
|
if: ${{ matrix.build == 'avx512-x64' }}
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
run: |
|
run: |
|
||||||
cd build
|
cd build
|
||||||
|
@ -776,14 +806,14 @@ jobs:
|
||||||
- name: Test
|
- name: Test
|
||||||
id: cmake_test
|
id: cmake_test
|
||||||
# not all machines have native AVX-512
|
# not all machines have native AVX-512
|
||||||
if: ${{ matrix.build != 'arm64' && matrix.build != 'clblast' && matrix.build != 'kompute' && matrix.build != 'vulkan' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }}
|
if: ${{ matrix.build != 'msvc-arm64' && matrix.build != 'llvm-arm64' && matrix.build != 'clblast-x64' && matrix.build != 'kompute-x64' && matrix.build != 'vulkan-x64' && (matrix.build != 'avx512-x64' || env.HAS_AVX512F == '1') }}
|
||||||
run: |
|
run: |
|
||||||
cd build
|
cd build
|
||||||
ctest -L main -C Release --verbose --timeout 900
|
ctest -L main -C Release --verbose --timeout 900
|
||||||
|
|
||||||
- name: Test (Intel SDE)
|
- name: Test (Intel SDE)
|
||||||
id: cmake_test_sde
|
id: cmake_test_sde
|
||||||
if: ${{ matrix.build == 'avx512' && env.HAS_AVX512F == '0' }} # use Intel SDE for AVX-512 emulation
|
if: ${{ matrix.build == 'avx512-x64' && env.HAS_AVX512F == '0' }} # use Intel SDE for AVX-512 emulation
|
||||||
run: |
|
run: |
|
||||||
curl.exe -o $env:RUNNER_TEMP/sde.tar.xz -L "https://downloadmirror.intel.com/813591/sde-external-${env:SDE_VERSION}-win.tar.xz"
|
curl.exe -o $env:RUNNER_TEMP/sde.tar.xz -L "https://downloadmirror.intel.com/813591/sde-external-${env:SDE_VERSION}-win.tar.xz"
|
||||||
# for some weird reason windows tar doesn't like sde tar.xz
|
# for some weird reason windows tar doesn't like sde tar.xz
|
||||||
|
@ -811,14 +841,14 @@ jobs:
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt
|
Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt
|
||||||
7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip .\build\bin\Release\*
|
7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip .\build\bin\Release\*
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip
|
path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip
|
||||||
name: llama-bin-win-${{ matrix.build }}-x64.zip
|
name: llama-bin-win-${{ matrix.build }}.zip
|
||||||
|
|
||||||
windows-latest-cmake-cuda:
|
windows-latest-cmake-cuda:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
@ -898,9 +928,9 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
env:
|
env:
|
||||||
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/62641e01-1e8d-4ace-91d6-ae03f7f8a71f/w_BaseKit_p_2024.0.0.49563_offline.exe
|
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7dff44ba-e3af-4448-841c-0d616c8da6e7/w_BaseKit_p_2024.1.0.595_offline.exe
|
||||||
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel
|
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel
|
||||||
|
ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
id: checkout
|
id: checkout
|
||||||
|
@ -932,6 +962,17 @@ jobs:
|
||||||
id: pack_artifacts
|
id: pack_artifacts
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
|
echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.4.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
|
||||||
|
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/pi_win_proxy_loader.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/pi_level_zero.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl7.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
|
||||||
|
echo "cp oneAPI running time dll files to ./build/bin done"
|
||||||
7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/*
|
7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/*
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
|
@ -941,6 +982,37 @@ jobs:
|
||||||
path: llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip
|
path: llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip
|
||||||
name: llama-bin-win-sycl-x64.zip
|
name: llama-bin-win-sycl-x64.zip
|
||||||
|
|
||||||
|
windows-latest-cmake-hip:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "Downloading AMD HIP SDK Installer"
|
||||||
|
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||||
|
write-host "Installing AMD HIP SDK"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||||
|
write-host "Completed AMD HIP SDK installation"
|
||||||
|
|
||||||
|
- name: Verify ROCm
|
||||||
|
id: verify
|
||||||
|
run: |
|
||||||
|
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||||
|
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
|
||||||
|
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DLLAMA_HIPBLAS=ON
|
||||||
|
cmake --build build --config Release
|
||||||
|
|
||||||
ios-xcode-build:
|
ios-xcode-build:
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
|
|
||||||
|
|
17
.github/workflows/labeler.yml
vendored
Normal file
17
.github/workflows/labeler.yml
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
name: "Pull Request Labeler"
|
||||||
|
on:
|
||||||
|
- pull_request_target
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: "ggerganov/llama.cpp"
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
configuration-path: '.github/labeler.yml'
|
8
.github/workflows/server.yml
vendored
8
.github/workflows/server.yml
vendored
|
@ -32,10 +32,8 @@ jobs:
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
# TODO: temporary disabled due to linux kernel issues
|
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||||
#sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
build_type: [RelWithDebInfo]
|
||||||
sanitizer: [UNDEFINED]
|
|
||||||
build_type: [Debug]
|
|
||||||
include:
|
include:
|
||||||
- build_type: Release
|
- build_type: Release
|
||||||
sanitizer: ""
|
sanitizer: ""
|
||||||
|
@ -102,10 +100,8 @@ jobs:
|
||||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
||||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target server
|
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target server
|
||||||
|
|
||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
id: server_integration_tests
|
id: server_integration_tests
|
||||||
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
|
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd examples/server/tests
|
||||||
PORT=8888 ./tests.sh
|
PORT=8888 ./tests.sh
|
||||||
|
|
29
.github/workflows/zig-build.yml
vendored
29
.github/workflows/zig-build.yml
vendored
|
@ -1,29 +0,0 @@
|
||||||
name: Zig CI
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
runs-on: [ubuntu-latest, macos-latest, windows-latest]
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: goto-bus-stop/setup-zig@v2
|
|
||||||
with:
|
|
||||||
version: 0.11.0
|
|
||||||
- name: Build Summary
|
|
||||||
run: zig build --summary all -freference-trace
|
|
133
CMakeLists.txt
133
CMakeLists.txt
|
@ -72,11 +72,13 @@ else()
|
||||||
set(INS_ENB ON)
|
set(INS_ENB ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
option(LLAMA_SVE "llama: enable SVE" OFF)
|
||||||
option(LLAMA_AVX "llama: enable AVX" ${INS_ENB})
|
option(LLAMA_AVX "llama: enable AVX" ${INS_ENB})
|
||||||
option(LLAMA_AVX2 "llama: enable AVX2" ${INS_ENB})
|
option(LLAMA_AVX2 "llama: enable AVX2" ${INS_ENB})
|
||||||
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
|
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
|
||||||
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
|
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
|
||||||
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
|
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
|
||||||
|
option(LLAMA_AVX512_BF16 "llama: enable AVX512-BF16" OFF)
|
||||||
option(LLAMA_FMA "llama: enable FMA" ${INS_ENB})
|
option(LLAMA_FMA "llama: enable FMA" ${INS_ENB})
|
||||||
# in MSVC F16C is implied with AVX2/AVX512
|
# in MSVC F16C is implied with AVX2/AVX512
|
||||||
if (NOT MSVC)
|
if (NOT MSVC)
|
||||||
|
@ -122,8 +124,7 @@ set(LLAMA_METAL_MACOSX_VERSION_MIN "" CACHE STRING
|
||||||
"llama: metal minimum macOS version")
|
"llama: metal minimum macOS version")
|
||||||
set(LLAMA_METAL_STD "" CACHE STRING "llama: metal standard version (-std flag)")
|
set(LLAMA_METAL_STD "" CACHE STRING "llama: metal standard version (-std flag)")
|
||||||
option(LLAMA_KOMPUTE "llama: use Kompute" OFF)
|
option(LLAMA_KOMPUTE "llama: use Kompute" OFF)
|
||||||
option(LLAMA_MPI "llama: use MPI" OFF)
|
option(LLAMA_RPC "llama: use RPC" OFF)
|
||||||
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
|
||||||
option(LLAMA_SYCL "llama: use SYCL" OFF)
|
option(LLAMA_SYCL "llama: use SYCL" OFF)
|
||||||
option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF)
|
option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF)
|
||||||
set(LLAMA_SYCL_TARGET "INTEL" CACHE STRING "llama: sycl target device")
|
set(LLAMA_SYCL_TARGET "INTEL" CACHE STRING "llama: sycl target device")
|
||||||
|
@ -133,6 +134,8 @@ set(LLAMA_SCHED_MAX_COPIES "4" CACHE STRING "llama: max input copies for pipeli
|
||||||
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
||||||
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
||||||
option(LLAMA_BUILD_SERVER "llama: build server example" ON)
|
option(LLAMA_BUILD_SERVER "llama: build server example" ON)
|
||||||
|
option(LLAMA_LASX "llama: enable lasx" ON)
|
||||||
|
option(LLAMA_LSX "llama: enable lsx" ON)
|
||||||
|
|
||||||
# add perf arguments
|
# add perf arguments
|
||||||
option(LLAMA_PERF "llama: enable perf" OFF)
|
option(LLAMA_PERF "llama: enable perf" OFF)
|
||||||
|
@ -296,7 +299,7 @@ if (LLAMA_BLAS)
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
set(BLA_STATIC ON)
|
set(BLA_STATIC ON)
|
||||||
endif()
|
endif()
|
||||||
if ($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22)
|
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22)
|
||||||
set(BLA_SIZEOF_INTEGER 8)
|
set(BLA_SIZEOF_INTEGER 8)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -381,10 +384,6 @@ if (LLAMA_LLAMAFILE)
|
||||||
set(GGML_SOURCES_LLAMAFILE sgemm.cpp)
|
set(GGML_SOURCES_LLAMAFILE sgemm.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_QKK_64)
|
|
||||||
add_compile_definitions(GGML_QKK_64)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (LLAMA_CUBLAS)
|
if (LLAMA_CUBLAS)
|
||||||
message(WARNING "LLAMA_CUBLAS is deprecated and will be removed in the future.\nUse LLAMA_CUDA instead")
|
message(WARNING "LLAMA_CUBLAS is deprecated and will be removed in the future.\nUse LLAMA_CUDA instead")
|
||||||
set(LLAMA_CUDA ON)
|
set(LLAMA_CUDA ON)
|
||||||
|
@ -405,6 +404,7 @@ if (LLAMA_CUDA)
|
||||||
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
|
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUDA)
|
add_compile_definitions(GGML_USE_CUDA)
|
||||||
|
add_compile_definitions(GGML_CUDA_USE_GRAPHS)
|
||||||
if (LLAMA_CUDA_FORCE_DMMV)
|
if (LLAMA_CUDA_FORCE_DMMV)
|
||||||
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
||||||
endif()
|
endif()
|
||||||
|
@ -430,7 +430,7 @@ if (LLAMA_CUDA)
|
||||||
|
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
# As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library
|
# As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt)
|
||||||
else ()
|
else ()
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
||||||
|
@ -464,33 +464,15 @@ if (LLAMA_CUDA)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_MPI)
|
if (LLAMA_RPC)
|
||||||
cmake_minimum_required(VERSION 3.10)
|
add_compile_definitions(GGML_USE_RPC)
|
||||||
find_package(MPI)
|
|
||||||
if (MPI_C_FOUND)
|
|
||||||
message(STATUS "MPI found")
|
|
||||||
|
|
||||||
set(GGML_HEADERS_MPI ggml-mpi.h)
|
if (WIN32)
|
||||||
set(GGML_SOURCES_MPI ggml-mpi.c)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ws2_32)
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_MPI)
|
|
||||||
add_compile_definitions(${MPI_C_COMPILE_DEFINITIONS})
|
|
||||||
|
|
||||||
if (NOT MSVC)
|
|
||||||
add_compile_options(-Wno-cast-qual)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_C_LIBRARIES})
|
set(GGML_HEADERS_RPC ggml-rpc.h)
|
||||||
set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${MPI_C_INCLUDE_DIRS})
|
set(GGML_SOURCES_RPC ggml-rpc.cpp)
|
||||||
|
|
||||||
# Even if you're only using the C header, C++ programs may bring in MPI
|
|
||||||
# C++ functions, so more linkage is needed
|
|
||||||
if (MPI_CXX_FOUND)
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_CXX_LIBRARIES})
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
message(WARNING "MPI not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_CLBLAST)
|
if (LLAMA_CLBLAST)
|
||||||
|
@ -519,6 +501,12 @@ if (LLAMA_VULKAN)
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_VULKAN)
|
add_compile_definitions(GGML_USE_VULKAN)
|
||||||
|
|
||||||
|
# Workaround to the "can't dereference invalidated vector iterator" bug in clang-cl debug build
|
||||||
|
# Posssibly relevant: https://stackoverflow.com/questions/74748276/visual-studio-no-displays-the-correct-length-of-stdvector
|
||||||
|
if (MSVC AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||||
|
add_compile_definitions(_ITERATOR_DEBUG_LEVEL=0)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (LLAMA_VULKAN_CHECK_RESULTS)
|
if (LLAMA_VULKAN_CHECK_RESULTS)
|
||||||
add_compile_definitions(GGML_VULKAN_CHECK_RESULTS)
|
add_compile_definitions(GGML_VULKAN_CHECK_RESULTS)
|
||||||
endif()
|
endif()
|
||||||
|
@ -542,16 +530,37 @@ if (LLAMA_VULKAN)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_HIPBLAS)
|
if (LLAMA_HIPBLAS)
|
||||||
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
|
if ($ENV{ROCM_PATH})
|
||||||
|
set(ROCM_PATH $ENV{ROCM_PATH})
|
||||||
|
else()
|
||||||
|
set(ROCM_PATH /opt/rocm)
|
||||||
|
endif()
|
||||||
|
list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH})
|
||||||
|
|
||||||
if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")
|
# CMake on Windows doesn't support the HIP language yet
|
||||||
message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang")
|
if(WIN32)
|
||||||
|
set(CXX_IS_HIPCC TRUE)
|
||||||
|
else()
|
||||||
|
string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CXX_IS_HIPCC)
|
||||||
|
if(LINUX)
|
||||||
if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
||||||
message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
|
message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
message(WARNING "Setting hipcc as the C++ compiler is legacy behavior."
|
||||||
|
" Prefer setting the HIP compiler directly. See README for details.")
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
# Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES.
|
||||||
|
if(AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES)
|
||||||
|
set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_TARGETS})
|
||||||
|
endif()
|
||||||
|
cmake_minimum_required(VERSION 3.21)
|
||||||
|
enable_language(HIP)
|
||||||
|
endif()
|
||||||
find_package(hip REQUIRED)
|
find_package(hip REQUIRED)
|
||||||
find_package(hipblas REQUIRED)
|
find_package(hipblas REQUIRED)
|
||||||
find_package(rocblas REQUIRED)
|
find_package(rocblas REQUIRED)
|
||||||
|
@ -585,13 +594,18 @@ if (LLAMA_HIPBLAS)
|
||||||
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||||
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||||
|
|
||||||
|
if (CXX_IS_HIPCC)
|
||||||
set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
|
set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
|
||||||
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device)
|
||||||
|
else()
|
||||||
|
set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} PUBLIC hip::host roc::rocblas roc::hipblas)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_SYCL)
|
if (LLAMA_SYCL)
|
||||||
|
@ -614,6 +628,10 @@ if (LLAMA_SYCL)
|
||||||
add_compile_definitions(GGML_SYCL_F16)
|
add_compile_definitions(GGML_SYCL_F16)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (LLAMA_CUDA_FORCE_MMQ)
|
||||||
|
add_compile_definitions(GGML_SYCL_FORCE_MMQ)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_compile_options(-I./) #include DPCT
|
add_compile_options(-I./) #include DPCT
|
||||||
add_compile_options(-I/${SYCL_INCLUDE_DIR})
|
add_compile_options(-I/${SYCL_INCLUDE_DIR})
|
||||||
|
|
||||||
|
@ -994,6 +1012,11 @@ if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STR
|
||||||
if (GGML_COMPILER_SUPPORT_DOTPROD)
|
if (GGML_COMPILER_SUPPORT_DOTPROD)
|
||||||
add_compile_definitions(__ARM_FEATURE_DOTPROD)
|
add_compile_definitions(__ARM_FEATURE_DOTPROD)
|
||||||
endif ()
|
endif ()
|
||||||
|
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vmlaq_f32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_MATMUL_INT8)
|
||||||
|
if (GGML_COMPILER_SUPPORT_MATMUL_INT8)
|
||||||
|
add_compile_definitions(__ARM_FEATURE_MATMUL_INT8)
|
||||||
|
endif ()
|
||||||
|
|
||||||
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
|
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
|
||||||
if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
|
if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
|
||||||
add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
||||||
|
@ -1022,6 +1045,9 @@ if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STR
|
||||||
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
||||||
list(APPEND ARCH_FLAGS -mno-unaligned-access)
|
list(APPEND ARCH_FLAGS -mno-unaligned-access)
|
||||||
endif()
|
endif()
|
||||||
|
if (LLAMA_SVE)
|
||||||
|
list(APPEND ARCH_FLAGS -march=armv8.6-a+sve)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR
|
elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR
|
||||||
(NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND
|
(NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND
|
||||||
|
@ -1046,6 +1072,10 @@ elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LW
|
||||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
|
||||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
|
||||||
endif()
|
endif()
|
||||||
|
if (LLAMA_AVX512_BF16)
|
||||||
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512BF16__>)
|
||||||
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512BF16__>)
|
||||||
|
endif()
|
||||||
elseif (LLAMA_AVX2)
|
elseif (LLAMA_AVX2)
|
||||||
list(APPEND ARCH_FLAGS /arch:AVX2)
|
list(APPEND ARCH_FLAGS /arch:AVX2)
|
||||||
elseif (LLAMA_AVX)
|
elseif (LLAMA_AVX)
|
||||||
|
@ -1077,6 +1107,9 @@ elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LW
|
||||||
if (LLAMA_AVX512_VNNI)
|
if (LLAMA_AVX512_VNNI)
|
||||||
list(APPEND ARCH_FLAGS -mavx512vnni)
|
list(APPEND ARCH_FLAGS -mavx512vnni)
|
||||||
endif()
|
endif()
|
||||||
|
if (LLAMA_AVX512_BF16)
|
||||||
|
list(APPEND ARCH_FLAGS -mavx512bf16)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
|
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
|
||||||
message(STATUS "PowerPC detected")
|
message(STATUS "PowerPC detected")
|
||||||
|
@ -1086,6 +1119,17 @@ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
|
||||||
list(APPEND ARCH_FLAGS -mcpu=native -mtune=native)
|
list(APPEND ARCH_FLAGS -mcpu=native -mtune=native)
|
||||||
#TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
|
#TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
|
||||||
endif()
|
endif()
|
||||||
|
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
|
||||||
|
message(STATUS "loongarch64 detected")
|
||||||
|
|
||||||
|
list(APPEND ARCH_FLAGS -march=loongarch64)
|
||||||
|
if (LLAMA_LASX)
|
||||||
|
list(APPEND ARCH_FLAGS -mlasx)
|
||||||
|
endif()
|
||||||
|
if (LLAMA_LSX)
|
||||||
|
list(APPEND ARCH_FLAGS -mlsx)
|
||||||
|
endif()
|
||||||
|
|
||||||
else()
|
else()
|
||||||
message(STATUS "Unknown architecture")
|
message(STATUS "Unknown architecture")
|
||||||
endif()
|
endif()
|
||||||
|
@ -1174,7 +1218,7 @@ add_library(ggml OBJECT
|
||||||
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
|
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
|
||||||
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
|
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
|
||||||
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
|
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
|
||||||
${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI}
|
${GGML_SOURCES_RPC} ${GGML_HEADERS_RPC}
|
||||||
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
|
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
|
||||||
${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL}
|
${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL}
|
||||||
${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE}
|
${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE}
|
||||||
|
@ -1261,7 +1305,7 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake
|
||||||
|
|
||||||
set(GGML_PUBLIC_HEADERS "ggml.h" "ggml-alloc.h" "ggml-backend.h"
|
set(GGML_PUBLIC_HEADERS "ggml.h" "ggml-alloc.h" "ggml-backend.h"
|
||||||
"${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}"
|
"${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}"
|
||||||
"${GGML_HEADERS_METAL}" "${GGML_HEADERS_MPI}" "${GGML_HEADERS_EXTRA}")
|
"${GGML_HEADERS_METAL}" "${GGML_HEADERS_EXTRA}")
|
||||||
|
|
||||||
set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
|
set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
|
||||||
install(TARGETS ggml PUBLIC_HEADER)
|
install(TARGETS ggml PUBLIC_HEADER)
|
||||||
|
@ -1270,18 +1314,7 @@ set_target_properties(llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
install(TARGETS llama LIBRARY PUBLIC_HEADER)
|
install(TARGETS llama LIBRARY PUBLIC_HEADER)
|
||||||
|
|
||||||
install(
|
install(
|
||||||
FILES convert.py
|
FILES convert-hf-to-gguf.py
|
||||||
PERMISSIONS
|
|
||||||
OWNER_READ
|
|
||||||
OWNER_WRITE
|
|
||||||
OWNER_EXECUTE
|
|
||||||
GROUP_READ
|
|
||||||
GROUP_EXECUTE
|
|
||||||
WORLD_READ
|
|
||||||
WORLD_EXECUTE
|
|
||||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
|
||||||
install(
|
|
||||||
FILES convert-lora-to-ggml.py
|
|
||||||
PERMISSIONS
|
PERMISSIONS
|
||||||
OWNER_READ
|
OWNER_READ
|
||||||
OWNER_WRITE
|
OWNER_WRITE
|
||||||
|
|
49
CMakePresets.json
Normal file
49
CMakePresets.json
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"configurePresets": [
|
||||||
|
{
|
||||||
|
"name": "base",
|
||||||
|
"hidden": true,
|
||||||
|
"generator": "Ninja",
|
||||||
|
"binaryDir": "${sourceDir}/build-${presetName}",
|
||||||
|
"cacheVariables": {
|
||||||
|
"CMAKE_EXPORT_COMPILE_COMMANDS": "ON",
|
||||||
|
"CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
{ "name": "debug", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Debug" } },
|
||||||
|
{ "name": "release", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "RelWithDebInfo" } },
|
||||||
|
{ "name": "static", "hidden": true, "cacheVariables": { "LLAMA_STATIC": "ON" } },
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "arm64-windows-msvc", "hidden": true,
|
||||||
|
"architecture": { "value": "arm64", "strategy": "external" },
|
||||||
|
"toolset": { "value": "host=x86_64", "strategy": "external" },
|
||||||
|
"cacheVariables": {
|
||||||
|
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/arm64-windows-msvc.cmake"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "arm64-windows-llvm", "hidden": true,
|
||||||
|
"architecture": { "value": "arm64", "strategy": "external" },
|
||||||
|
"toolset": { "value": "host=x86_64", "strategy": "external" },
|
||||||
|
"cacheVariables": {
|
||||||
|
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/arm64-windows-llvm.cmake"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
{ "name": "arm64-windows-llvm-debug" , "inherits": [ "base", "arm64-windows-llvm", "debug" ] },
|
||||||
|
{ "name": "arm64-windows-llvm-release", "inherits": [ "base", "arm64-windows-llvm", "release" ] },
|
||||||
|
{ "name": "arm64-windows-llvm+static-release", "inherits": [ "base", "arm64-windows-llvm", "release", "static" ] },
|
||||||
|
|
||||||
|
{ "name": "arm64-windows-msvc-debug" , "inherits": [ "base", "arm64-windows-msvc", "debug" ] },
|
||||||
|
{ "name": "arm64-windows-msvc-release", "inherits": [ "base", "arm64-windows-msvc", "release" ] },
|
||||||
|
{ "name": "arm64-windows-msvc+static-release", "inherits": [ "base", "arm64-windows-msvc", "release", "static" ] },
|
||||||
|
|
||||||
|
{ "name": "x64-windows-msvc-debug" , "inherits": [ "base", "debug" ] },
|
||||||
|
{ "name": "x64-windows-msvc-release", "inherits": [ "base", "release" ] },
|
||||||
|
{ "name": "x64-windows-msvc+static-release", "inherits": [ "base", "release", "static" ] }
|
||||||
|
]
|
||||||
|
}
|
32
Makefile
32
Makefile
|
@ -379,15 +379,16 @@ ifneq ($(filter ppc64le%,$(UNAME_M)),)
|
||||||
CUDA_POWER_ARCH = 1
|
CUDA_POWER_ARCH = 1
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq ($(filter loongarch64%,$(UNAME_M)),)
|
||||||
|
MK_CFLAGS += -mlasx
|
||||||
|
MK_CXXFLAGS += -mlasx
|
||||||
|
endif
|
||||||
|
|
||||||
else
|
else
|
||||||
MK_CFLAGS += -march=rv64gcv -mabi=lp64d
|
MK_CFLAGS += -march=rv64gcv -mabi=lp64d
|
||||||
MK_CXXFLAGS += -march=rv64gcv -mabi=lp64d
|
MK_CXXFLAGS += -march=rv64gcv -mabi=lp64d
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_QKK_64
|
|
||||||
MK_CPPFLAGS += -DGGML_QKK_64
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef LLAMA_NO_ACCELERATE
|
ifndef LLAMA_NO_ACCELERATE
|
||||||
# Mac OS - include Accelerate framework.
|
# Mac OS - include Accelerate framework.
|
||||||
# `-framework Accelerate` works both with Apple Silicon and Mac Intel
|
# `-framework Accelerate` works both with Apple Silicon and Mac Intel
|
||||||
|
@ -399,13 +400,6 @@ ifndef LLAMA_NO_ACCELERATE
|
||||||
endif
|
endif
|
||||||
endif # LLAMA_NO_ACCELERATE
|
endif # LLAMA_NO_ACCELERATE
|
||||||
|
|
||||||
ifdef LLAMA_MPI
|
|
||||||
MK_CPPFLAGS += -DGGML_USE_MPI
|
|
||||||
MK_CFLAGS += -Wno-cast-qual
|
|
||||||
MK_CXXFLAGS += -Wno-cast-qual
|
|
||||||
OBJS += ggml-mpi.o
|
|
||||||
endif # LLAMA_MPI
|
|
||||||
|
|
||||||
ifdef LLAMA_OPENBLAS
|
ifdef LLAMA_OPENBLAS
|
||||||
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas)
|
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas)
|
||||||
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas)
|
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas)
|
||||||
|
@ -433,7 +427,7 @@ ifdef LLAMA_CUDA
|
||||||
else
|
else
|
||||||
CUDA_PATH ?= /usr/local/cuda
|
CUDA_PATH ?= /usr/local/cuda
|
||||||
endif
|
endif
|
||||||
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include -DGGML_CUDA_USE_GRAPHS
|
||||||
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
||||||
|
@ -447,6 +441,9 @@ endif # JETSON_EOL_MODULE_DETECT
|
||||||
ifdef LLAMA_DEBUG
|
ifdef LLAMA_DEBUG
|
||||||
MK_NVCCFLAGS += -lineinfo
|
MK_NVCCFLAGS += -lineinfo
|
||||||
endif # LLAMA_DEBUG
|
endif # LLAMA_DEBUG
|
||||||
|
ifdef LLAMA_CUDA_DEBUG
|
||||||
|
MK_NVCCFLAGS += --device-debug
|
||||||
|
endif # LLAMA_CUDA_DEBUG
|
||||||
ifdef LLAMA_CUDA_NVCC
|
ifdef LLAMA_CUDA_NVCC
|
||||||
NVCC = $(CCACHE) $(LLAMA_CUDA_NVCC)
|
NVCC = $(CCACHE) $(LLAMA_CUDA_NVCC)
|
||||||
else
|
else
|
||||||
|
@ -560,10 +557,10 @@ endif # LLAMA_VULKAN
|
||||||
ifdef LLAMA_HIPBLAS
|
ifdef LLAMA_HIPBLAS
|
||||||
ifeq ($(wildcard /opt/rocm),)
|
ifeq ($(wildcard /opt/rocm),)
|
||||||
ROCM_PATH ?= /usr
|
ROCM_PATH ?= /usr
|
||||||
GPU_TARGETS ?= $(shell $(shell which amdgpu-arch))
|
AMDGPU_TARGETS ?= $(shell $(shell which amdgpu-arch))
|
||||||
else
|
else
|
||||||
ROCM_PATH ?= /opt/rocm
|
ROCM_PATH ?= /opt/rocm
|
||||||
GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch)
|
AMDGPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch)
|
||||||
endif
|
endif
|
||||||
HIPCC ?= $(CCACHE) $(ROCM_PATH)/bin/hipcc
|
HIPCC ?= $(CCACHE) $(ROCM_PATH)/bin/hipcc
|
||||||
LLAMA_CUDA_DMMV_X ?= 32
|
LLAMA_CUDA_DMMV_X ?= 32
|
||||||
|
@ -575,7 +572,7 @@ ifdef LLAMA_HIP_UMA
|
||||||
endif # LLAMA_HIP_UMA
|
endif # LLAMA_HIP_UMA
|
||||||
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
||||||
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
||||||
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
HIPFLAGS += $(addprefix --offload-arch=,$(AMDGPU_TARGETS))
|
||||||
HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
|
HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
|
||||||
HIPFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
|
HIPFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
|
||||||
HIPFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
|
HIPFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
|
||||||
|
@ -629,11 +626,6 @@ ggml-metal-embed.o: ggml-metal.metal ggml-common.h
|
||||||
endif
|
endif
|
||||||
endif # LLAMA_METAL
|
endif # LLAMA_METAL
|
||||||
|
|
||||||
ifdef LLAMA_MPI
|
|
||||||
ggml-mpi.o: ggml-mpi.c ggml-mpi.h
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
endif # LLAMA_MPI
|
|
||||||
|
|
||||||
ifndef LLAMA_NO_LLAMAFILE
|
ifndef LLAMA_NO_LLAMAFILE
|
||||||
sgemm.o: sgemm.cpp sgemm.h ggml.h
|
sgemm.o: sgemm.cpp sgemm.h ggml.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
|
@ -55,8 +55,8 @@ It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS,
|
||||||
## OS
|
## OS
|
||||||
|
|
||||||
| OS | Status | Verified |
|
| OS | Status | Verified |
|
||||||
|---------|---------|------------------------------------|
|
|---------|---------|------------------------------------------------|
|
||||||
| Linux | Support | Ubuntu 22.04, Fedora Silverblue 39 |
|
| Linux | Support | Ubuntu 22.04, Fedora Silverblue 39, Arch Linux |
|
||||||
| Windows | Support | Windows 11 |
|
| Windows | Support | Windows 11 |
|
||||||
|
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS,
|
||||||
|-------------------------------|---------|---------------------------------------|
|
|-------------------------------|---------|---------------------------------------|
|
||||||
| Intel Data Center Max Series | Support | Max 1550, 1100 |
|
| Intel Data Center Max Series | Support | Max 1550, 1100 |
|
||||||
| Intel Data Center Flex Series | Support | Flex 170 |
|
| Intel Data Center Flex Series | Support | Flex 170 |
|
||||||
| Intel Arc Series | Support | Arc 770, 730M |
|
| Intel Arc Series | Support | Arc 770, 730M, Arc A750 |
|
||||||
| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake |
|
| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake |
|
||||||
| Intel iGPU | Support | iGPU in i5-1250P, i7-1260P, i7-1165G7 |
|
| Intel iGPU | Support | iGPU in i5-1250P, i7-1260P, i7-1165G7 |
|
||||||
|
|
||||||
|
|
115
README.md
115
README.md
|
@ -3,6 +3,8 @@
|
||||||

|

|
||||||
|
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
[](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml)
|
||||||
|
[](https://conan.io/center/llama-cpp)
|
||||||
|
|
||||||
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
|
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
|
||||||
|
|
||||||
|
@ -107,7 +109,6 @@ Typically finetunes of the base models below are supported as well.
|
||||||
- [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila)
|
- [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila)
|
||||||
- [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187)
|
- [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187)
|
||||||
- [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim)
|
- [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim)
|
||||||
- [X] [Persimmon 8B](https://github.com/ggerganov/llama.cpp/pull/3410)
|
|
||||||
- [X] [MPT](https://github.com/ggerganov/llama.cpp/pull/3417)
|
- [X] [MPT](https://github.com/ggerganov/llama.cpp/pull/3417)
|
||||||
- [X] [Bloom](https://github.com/ggerganov/llama.cpp/pull/3553)
|
- [X] [Bloom](https://github.com/ggerganov/llama.cpp/pull/3553)
|
||||||
- [x] [Yi models](https://huggingface.co/models?search=01-ai/Yi)
|
- [x] [Yi models](https://huggingface.co/models?search=01-ai/Yi)
|
||||||
|
@ -128,6 +129,7 @@ Typically finetunes of the base models below are supported as well.
|
||||||
- [x] [SEA-LION](https://huggingface.co/models?search=sea-lion)
|
- [x] [SEA-LION](https://huggingface.co/models?search=sea-lion)
|
||||||
- [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B)
|
- [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B)
|
||||||
- [x] [OLMo](https://allenai.org/olmo)
|
- [x] [OLMo](https://allenai.org/olmo)
|
||||||
|
- [x] [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) + [Pythia](https://github.com/EleutherAI/pythia)
|
||||||
|
|
||||||
(instructions for supporting more models: [HOWTO-add-model.md](./docs/HOWTO-add-model.md))
|
(instructions for supporting more models: [HOWTO-add-model.md](./docs/HOWTO-add-model.md))
|
||||||
|
|
||||||
|
@ -140,6 +142,8 @@ Typically finetunes of the base models below are supported as well.
|
||||||
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
|
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
|
||||||
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
|
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
|
||||||
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
|
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
|
||||||
|
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
|
||||||
|
- [x] [Bunny](https://github.com/BAAI-DCAI/Bunny)
|
||||||
|
|
||||||
**HTTP server**
|
**HTTP server**
|
||||||
|
|
||||||
|
@ -175,6 +179,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
||||||
- [nat/openplayground](https://github.com/nat/openplayground)
|
- [nat/openplayground](https://github.com/nat/openplayground)
|
||||||
- [Faraday](https://faraday.dev/) (proprietary)
|
- [Faraday](https://faraday.dev/) (proprietary)
|
||||||
- [LMStudio](https://lmstudio.ai/) (proprietary)
|
- [LMStudio](https://lmstudio.ai/) (proprietary)
|
||||||
|
- [Layla](https://play.google.com/store/apps/details?id=com.laylalite) (proprietary)
|
||||||
- [LocalAI](https://github.com/mudler/LocalAI) (MIT)
|
- [LocalAI](https://github.com/mudler/LocalAI) (MIT)
|
||||||
- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL)
|
- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL)
|
||||||
- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile)
|
- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile)
|
||||||
|
@ -200,6 +205,10 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
||||||
|
|
||||||
*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)*
|
*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)*
|
||||||
|
|
||||||
|
**Tools:**
|
||||||
|
|
||||||
|
- [akx/ggify](https://github.com/akx/ggify) – download PyTorch models from HuggingFace Hub and convert them to GGML
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
Here is a typical run using LLaMA v2 13B on M2 Ultra:
|
Here is a typical run using LLaMA v2 13B on M2 Ultra:
|
||||||
|
@ -299,7 +308,7 @@ cd llama.cpp
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
In order to build llama.cpp you have three different options.
|
In order to build llama.cpp you have four different options.
|
||||||
|
|
||||||
- Using `make`:
|
- Using `make`:
|
||||||
- On Linux or MacOS:
|
- On Linux or MacOS:
|
||||||
|
@ -308,8 +317,6 @@ In order to build llama.cpp you have three different options.
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note**: for `Debug` builds, run `make LLAMA_DEBUG=1`
|
|
||||||
|
|
||||||
- On Windows:
|
- On Windows:
|
||||||
|
|
||||||
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
||||||
|
@ -321,6 +328,11 @@ In order to build llama.cpp you have three different options.
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- Notes:
|
||||||
|
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel.
|
||||||
|
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
||||||
|
- For debug builds, run `make LLAMA_DEBUG=1`
|
||||||
|
|
||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -328,16 +340,20 @@ In order to build llama.cpp you have three different options.
|
||||||
cmake --build build --config Release
|
cmake --build build --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note**: for `Debug` builds, there are two cases:
|
**Notes**:
|
||||||
|
|
||||||
- Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
|
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel.
|
||||||
|
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
||||||
|
- For debug builds, there are two cases:
|
||||||
|
|
||||||
|
1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cmake -B build -DCMAKE_BUILD_TYPE=Debug
|
cmake -B build -DCMAKE_BUILD_TYPE=Debug
|
||||||
cmake --build build
|
cmake --build build
|
||||||
```
|
```
|
||||||
|
|
||||||
- Multi-config generators (`-G` param set to Visual Studio, XCode...):
|
2. Multi-config generators (`-G` param set to Visual Studio, XCode...):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cmake -B build -G "Xcode"
|
cmake -B build -G "Xcode"
|
||||||
|
@ -372,6 +388,14 @@ In order to build llama.cpp you have three different options.
|
||||||
CLBLAST support for use OpenCL GPU acceleration in FreeBSD. Please read
|
CLBLAST support for use OpenCL GPU acceleration in FreeBSD. Please read
|
||||||
the instructions for use and activate this options in this document below.
|
the instructions for use and activate this options in this document below.
|
||||||
|
|
||||||
|
### Homebrew
|
||||||
|
|
||||||
|
On Mac and Linux, the homebrew package manager can be used via
|
||||||
|
```
|
||||||
|
brew install llama.cpp
|
||||||
|
```
|
||||||
|
The formula is automatically updated with new `llama.cpp` releases.
|
||||||
|
|
||||||
### Metal Build
|
### Metal Build
|
||||||
|
|
||||||
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
|
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
|
||||||
|
@ -380,45 +404,6 @@ To disable the Metal build at compile time use the `LLAMA_NO_METAL=1` flag or th
|
||||||
When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line
|
When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line
|
||||||
argument.
|
argument.
|
||||||
|
|
||||||
### MPI Build
|
|
||||||
|
|
||||||
MPI lets you distribute the computation over a cluster of machines. Because of the serial nature of LLM prediction, this won't yield any end-to-end speed-ups, but it will let you run larger models than would otherwise fit into RAM on a single machine.
|
|
||||||
|
|
||||||
First you will need MPI libraries installed on your system. The two most popular (only?) options are [MPICH](https://www.mpich.org) and [OpenMPI](https://www.open-mpi.org). Either can be installed with a package manager (`apt`, Homebrew, MacPorts, etc).
|
|
||||||
|
|
||||||
Next you will need to build the project with `LLAMA_MPI` set to true on all machines; if you're building with `make`, you will also need to specify an MPI-capable compiler (when building with CMake, this is configured automatically):
|
|
||||||
|
|
||||||
- Using `make`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
make CC=mpicc CXX=mpicxx LLAMA_MPI=1
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using `CMake`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -S . -B build -DLLAMA_MPI=ON
|
|
||||||
```
|
|
||||||
|
|
||||||
Once the programs are built, download/convert the weights on all of the machines in your cluster. The paths to the weights and programs should be identical on all machines.
|
|
||||||
|
|
||||||
Next, ensure password-less SSH access to each machine from the primary host, and create a `hostfile` with a list of the hostnames and their relative "weights" (slots). If you want to use localhost for computation, use its local subnet IP address rather than the loopback address or "localhost".
|
|
||||||
|
|
||||||
Here is an example hostfile:
|
|
||||||
|
|
||||||
```
|
|
||||||
192.168.0.1:2
|
|
||||||
malvolio.local:1
|
|
||||||
```
|
|
||||||
|
|
||||||
The above will distribute the computation across 2 processes on the first host and 1 process on the second host. Each process will use roughly an equal amount of RAM. Try to keep these numbers small, as inter-process (intra-host) communication is expensive.
|
|
||||||
|
|
||||||
Finally, you're ready to run a computation using `mpirun`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.gguf -n 128
|
|
||||||
```
|
|
||||||
|
|
||||||
### BLAS Build
|
### BLAS Build
|
||||||
|
|
||||||
Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS and CLBlast. There are currently several different BLAS implementations available for build and use:
|
Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS and CLBlast. There are currently several different BLAS implementations available for build and use:
|
||||||
|
@ -510,6 +495,7 @@ Building the program with BLAS support may lead to some performance improvements
|
||||||
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
||||||
| LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
| LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
||||||
| LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
|
| LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
|
||||||
|
| LLAMA_CUDA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | |
|
||||||
| LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
|
| LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
|
||||||
| LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
| LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
||||||
| LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. |
|
| LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. |
|
||||||
|
@ -526,13 +512,28 @@ Building the program with BLAS support may lead to some performance improvements
|
||||||
```
|
```
|
||||||
- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU):
|
- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU):
|
||||||
```bash
|
```bash
|
||||||
CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ \
|
HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
|
||||||
cmake -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
cmake -S . -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
||||||
&& cmake --build build --config Release -- -j 16
|
&& cmake --build build --config Release -- -j 16
|
||||||
```
|
```
|
||||||
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`.
|
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON`.
|
||||||
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
||||||
|
|
||||||
|
Note that if you get the following error:
|
||||||
|
```
|
||||||
|
clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library
|
||||||
|
```
|
||||||
|
Try searching for a directory under `HIP_PATH` that contains the file
|
||||||
|
`oclc_abi_version_400.bc`. Then, add the following to the start of the
|
||||||
|
command: `HIP_DEVICE_LIB_PATH=<directory-you-just-found>`, so something
|
||||||
|
like:
|
||||||
|
```bash
|
||||||
|
HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -p)" \
|
||||||
|
HIP_DEVICE_LIB_PATH=<directory-you-just-found> \
|
||||||
|
cmake -S . -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
||||||
|
&& cmake --build build -- -j 16
|
||||||
|
```
|
||||||
|
|
||||||
- Using `make` (example for target gfx1030, build with 16 CPU threads):
|
- Using `make` (example for target gfx1030, build with 16 CPU threads):
|
||||||
```bash
|
```bash
|
||||||
make -j16 LLAMA_HIPBLAS=1 LLAMA_HIP_UMA=1 AMDGPU_TARGETS=gfx1030
|
make -j16 LLAMA_HIPBLAS=1 LLAMA_HIP_UMA=1 AMDGPU_TARGETS=gfx1030
|
||||||
|
@ -541,10 +542,8 @@ Building the program with BLAS support may lead to some performance improvements
|
||||||
- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU):
|
- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU):
|
||||||
```bash
|
```bash
|
||||||
set PATH=%HIP_PATH%\bin;%PATH%
|
set PATH=%HIP_PATH%\bin;%PATH%
|
||||||
mkdir build
|
cmake -S . -B build -G Ninja -DAMDGPU_TARGETS=gfx1100 -DLLAMA_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release
|
||||||
cd build
|
cmake --build build
|
||||||
cmake -G Ninja -DAMDGPU_TARGETS=gfx1100 -DLLAMA_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release ..
|
|
||||||
cmake --build .
|
|
||||||
```
|
```
|
||||||
Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors)
|
Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors)
|
||||||
Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`.
|
Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`.
|
||||||
|
@ -710,9 +709,13 @@ Building the program with BLAS support may lead to some performance improvements
|
||||||
|
|
||||||
### Prepare and Quantize
|
### Prepare and Quantize
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `llama.cpp` main every 6 hours.
|
||||||
|
|
||||||
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
|
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
|
||||||
|
|
||||||
Note: `convert.py` does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
|
Note: `convert.py` has been moved to `examples/convert-legacy-llama.py` and shouldn't be used for anything other than `Llama/Llama2/Mistral` models and their derievatives.
|
||||||
|
It does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# obtain the official LLaMA model weights and place them in ./models
|
# obtain the official LLaMA model weights and place them in ./models
|
||||||
|
@ -729,10 +732,10 @@ ls ./models
|
||||||
python3 -m pip install -r requirements.txt
|
python3 -m pip install -r requirements.txt
|
||||||
|
|
||||||
# convert the model to ggml FP16 format
|
# convert the model to ggml FP16 format
|
||||||
python3 convert.py models/mymodel/
|
python3 convert-hf-to-gguf.py models/mymodel/
|
||||||
|
|
||||||
# [Optional] for models using BPE tokenizers
|
# [Optional] for models using BPE tokenizers
|
||||||
python convert.py models/mymodel/ --vocab-type bpe
|
python convert-hf-to-gguf.py models/mymodel/ --vocab-type bpe
|
||||||
|
|
||||||
# quantize the model to 4-bits (using Q4_K_M method)
|
# quantize the model to 4-bits (using Q4_K_M method)
|
||||||
./quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
|
./quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||||
|
|
172
build.zig
172
build.zig
|
@ -1,172 +0,0 @@
|
||||||
// Compatible with Zig Version 0.11.0
|
|
||||||
const std = @import("std");
|
|
||||||
const ArrayList = std.ArrayList;
|
|
||||||
const Compile = std.Build.Step.Compile;
|
|
||||||
const ConfigHeader = std.Build.Step.ConfigHeader;
|
|
||||||
const Mode = std.builtin.Mode;
|
|
||||||
const CrossTarget = std.zig.CrossTarget;
|
|
||||||
|
|
||||||
const Maker = struct {
|
|
||||||
builder: *std.build.Builder,
|
|
||||||
target: CrossTarget,
|
|
||||||
optimize: Mode,
|
|
||||||
enable_lto: bool,
|
|
||||||
|
|
||||||
include_dirs: ArrayList([]const u8),
|
|
||||||
cflags: ArrayList([]const u8),
|
|
||||||
cxxflags: ArrayList([]const u8),
|
|
||||||
objs: ArrayList(*Compile),
|
|
||||||
|
|
||||||
fn addInclude(m: *Maker, dir: []const u8) !void {
|
|
||||||
try m.include_dirs.append(dir);
|
|
||||||
}
|
|
||||||
fn addProjectInclude(m: *Maker, path: []const []const u8) !void {
|
|
||||||
try m.addInclude(try m.builder.build_root.join(m.builder.allocator, path));
|
|
||||||
}
|
|
||||||
fn addCFlag(m: *Maker, flag: []const u8) !void {
|
|
||||||
try m.cflags.append(flag);
|
|
||||||
}
|
|
||||||
fn addCxxFlag(m: *Maker, flag: []const u8) !void {
|
|
||||||
try m.cxxflags.append(flag);
|
|
||||||
}
|
|
||||||
fn addFlag(m: *Maker, flag: []const u8) !void {
|
|
||||||
try m.addCFlag(flag);
|
|
||||||
try m.addCxxFlag(flag);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn init(builder: *std.build.Builder) !Maker {
|
|
||||||
const target = builder.standardTargetOptions(.{});
|
|
||||||
const zig_version = @import("builtin").zig_version_string;
|
|
||||||
const commit_hash = try std.ChildProcess.exec(
|
|
||||||
.{ .allocator = builder.allocator, .argv = &.{ "git", "rev-parse", "HEAD" } },
|
|
||||||
);
|
|
||||||
try std.fs.cwd().writeFile("common/build-info.cpp", builder.fmt(
|
|
||||||
\\int LLAMA_BUILD_NUMBER = {};
|
|
||||||
\\char const *LLAMA_COMMIT = "{s}";
|
|
||||||
\\char const *LLAMA_COMPILER = "Zig {s}";
|
|
||||||
\\char const *LLAMA_BUILD_TARGET = "{s}";
|
|
||||||
\\
|
|
||||||
, .{ 0, commit_hash.stdout[0 .. commit_hash.stdout.len - 1], zig_version, try target.allocDescription(builder.allocator) }));
|
|
||||||
var m = Maker{
|
|
||||||
.builder = builder,
|
|
||||||
.target = target,
|
|
||||||
.optimize = builder.standardOptimizeOption(.{}),
|
|
||||||
.enable_lto = false,
|
|
||||||
.include_dirs = ArrayList([]const u8).init(builder.allocator),
|
|
||||||
.cflags = ArrayList([]const u8).init(builder.allocator),
|
|
||||||
.cxxflags = ArrayList([]const u8).init(builder.allocator),
|
|
||||||
.objs = ArrayList(*Compile).init(builder.allocator),
|
|
||||||
};
|
|
||||||
|
|
||||||
try m.addCFlag("-std=c11");
|
|
||||||
try m.addCxxFlag("-std=c++11");
|
|
||||||
try m.addProjectInclude(&.{});
|
|
||||||
try m.addProjectInclude(&.{"common"});
|
|
||||||
return m;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn obj(m: *const Maker, name: []const u8, src: []const u8) *Compile {
|
|
||||||
const o = m.builder.addObject(.{ .name = name, .target = m.target, .optimize = m.optimize });
|
|
||||||
if (o.target.getAbi() != .msvc)
|
|
||||||
o.defineCMacro("_GNU_SOURCE", null);
|
|
||||||
|
|
||||||
if (std.mem.endsWith(u8, src, ".c")) {
|
|
||||||
o.addCSourceFiles(&.{src}, m.cflags.items);
|
|
||||||
o.linkLibC();
|
|
||||||
} else {
|
|
||||||
o.addCSourceFiles(&.{src}, m.cxxflags.items);
|
|
||||||
if (o.target.getAbi() == .msvc) {
|
|
||||||
o.linkLibC(); // need winsdk + crt
|
|
||||||
} else {
|
|
||||||
// linkLibCpp already add (libc++ + libunwind + libc)
|
|
||||||
o.linkLibCpp();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (m.include_dirs.items) |i| o.addIncludePath(.{ .path = i });
|
|
||||||
o.want_lto = m.enable_lto;
|
|
||||||
return o;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn exe(m: *const Maker, name: []const u8, src: []const u8, deps: []const *Compile) *Compile {
|
|
||||||
const e = m.builder.addExecutable(.{ .name = name, .target = m.target, .optimize = m.optimize });
|
|
||||||
e.addCSourceFiles(&.{src}, m.cxxflags.items);
|
|
||||||
for (deps) |d| e.addObject(d);
|
|
||||||
for (m.objs.items) |o| e.addObject(o);
|
|
||||||
for (m.include_dirs.items) |i| e.addIncludePath(.{ .path = i });
|
|
||||||
|
|
||||||
// https://github.com/ziglang/zig/issues/15448
|
|
||||||
if (e.target.getAbi() == .msvc) {
|
|
||||||
e.linkLibC(); // need winsdk + crt
|
|
||||||
} else {
|
|
||||||
// linkLibCpp already add (libc++ + libunwind + libc)
|
|
||||||
e.linkLibCpp();
|
|
||||||
}
|
|
||||||
m.builder.installArtifact(e);
|
|
||||||
e.want_lto = m.enable_lto;
|
|
||||||
return e;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn build(b: *std.build.Builder) !void {
|
|
||||||
var make = try Maker.init(b);
|
|
||||||
make.enable_lto = b.option(bool, "lto", "Enable LTO optimization, (default: false)") orelse false;
|
|
||||||
|
|
||||||
const ggml = make.obj("ggml", "ggml.c");
|
|
||||||
const sgemm = make.obj("sgemm", "sgemm.cpp");
|
|
||||||
const ggml_alloc = make.obj("ggml-alloc", "ggml-alloc.c");
|
|
||||||
const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
|
|
||||||
const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
|
|
||||||
const unicode = make.obj("unicode", "unicode.cpp");
|
|
||||||
const unicode_data = make.obj("unicode-data", "unicode-data.cpp");
|
|
||||||
const llama = make.obj("llama", "llama.cpp");
|
|
||||||
const buildinfo = make.obj("common", "common/build-info.cpp");
|
|
||||||
const common = make.obj("common", "common/common.cpp");
|
|
||||||
const console = make.obj("console", "common/console.cpp");
|
|
||||||
const sampling = make.obj("sampling", "common/sampling.cpp");
|
|
||||||
const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp");
|
|
||||||
const json_schema_to_grammar = make.obj("json-schema-to-grammar", "common/json-schema-to-grammar.cpp");
|
|
||||||
const train = make.obj("train", "common/train.cpp");
|
|
||||||
const clip = make.obj("clip", "examples/llava/clip.cpp");
|
|
||||||
const llava = make.obj("llava", "examples/llava/llava.cpp");
|
|
||||||
|
|
||||||
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo, sampling, console, grammar_parser });
|
|
||||||
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo });
|
|
||||||
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo });
|
|
||||||
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo });
|
|
||||||
_ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo, train });
|
|
||||||
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo, train });
|
|
||||||
|
|
||||||
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, sgemm, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, json_schema_to_grammar, buildinfo, sampling, grammar_parser, clip, llava });
|
|
||||||
if (server.target.isWindows()) {
|
|
||||||
server.linkSystemLibrary("ws2_32");
|
|
||||||
}
|
|
||||||
|
|
||||||
const server_assets = [_][]const u8{ "index.html", "index.js", "completion.js", "json-schema-to-grammar.mjs" };
|
|
||||||
for (server_assets) |asset| {
|
|
||||||
const input_path = b.fmt("examples/server/public/{s}", .{asset});
|
|
||||||
const output_path = b.fmt("examples/server/{s}.hpp", .{asset});
|
|
||||||
|
|
||||||
// Portable equivalent of `b.addSystemCommand(&.{ "xxd", "-n", asset, "-i", input_path, output_path }) })`:
|
|
||||||
|
|
||||||
const input = try std.fs.cwd().readFileAlloc(b.allocator, input_path, std.math.maxInt(usize));
|
|
||||||
defer b.allocator.free(input);
|
|
||||||
|
|
||||||
var buf = std.ArrayList(u8).init(b.allocator);
|
|
||||||
defer buf.deinit();
|
|
||||||
|
|
||||||
for (input) |byte| {
|
|
||||||
try std.fmt.format(buf.writer(), "0x{X:0>2}, ", .{byte});
|
|
||||||
}
|
|
||||||
|
|
||||||
var name = try std.mem.replaceOwned(u8, b.allocator, asset, "-", "_");
|
|
||||||
defer b.allocator.free(name);
|
|
||||||
std.mem.replaceScalar(u8, name, '.', '_');
|
|
||||||
|
|
||||||
try std.fs.cwd().writeFile(output_path, b.fmt(
|
|
||||||
"unsigned char {s}[] = {{{s}}};\nunsigned int {s}_len = {d};\n",
|
|
||||||
.{ name, buf.items, name, input.len },
|
|
||||||
));
|
|
||||||
|
|
||||||
std.debug.print("Dumped hex of \"{s}\" ({s}) to {s}\n", .{ input_path, name, output_path });
|
|
||||||
}
|
|
||||||
}
|
|
518
ci/run.sh
518
ci/run.sh
|
@ -202,12 +202,15 @@ function gg_sum_test_scripts_release {
|
||||||
}
|
}
|
||||||
|
|
||||||
function gg_get_model {
|
function gg_get_model {
|
||||||
local gguf_3b="$MNT/models/open-llama/3B-v2/ggml-model-f16.gguf"
|
local gguf_0="$MNT/models/pythia/1.4B/ggml-model-f16.gguf"
|
||||||
local gguf_7b="$MNT/models/open-llama/7B-v2/ggml-model-f16.gguf"
|
local gguf_1="$MNT/models/pythia/2.8B/ggml-model-f16.gguf"
|
||||||
if [[ -s $gguf_3b ]]; then
|
local gguf_2="$MNT/models/open-llama/7B-v2/ggml-model-f16.gguf"
|
||||||
echo -n "$gguf_3b"
|
if [[ -s $gguf_0 ]]; then
|
||||||
elif [[ -s $gguf_7b ]]; then
|
echo -n "$gguf_0"
|
||||||
echo -n "$gguf_7b"
|
elif [[ -s $gguf_1 ]]; then
|
||||||
|
echo -n "$gguf_1"
|
||||||
|
elif [[ -s $gguf_2 ]]; then
|
||||||
|
echo -n "$gguf_2"
|
||||||
else
|
else
|
||||||
echo >&2 "No model found. Can't run gg_run_ctest_with_model."
|
echo >&2 "No model found. Can't run gg_run_ctest_with_model."
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -256,186 +259,6 @@ function gg_sum_ctest_with_model_release {
|
||||||
gg_printf '```\n'
|
gg_printf '```\n'
|
||||||
}
|
}
|
||||||
|
|
||||||
# open_llama_3b_v2
|
|
||||||
|
|
||||||
function gg_run_open_llama_3b_v2 {
|
|
||||||
cd ${SRC}
|
|
||||||
|
|
||||||
gg_wget models-mnt/open-llama/3B-v2/ https://huggingface.co/openlm-research/open_llama_3b_v2/raw/main/config.json
|
|
||||||
gg_wget models-mnt/open-llama/3B-v2/ https://huggingface.co/openlm-research/open_llama_3b_v2/resolve/main/tokenizer.model
|
|
||||||
gg_wget models-mnt/open-llama/3B-v2/ https://huggingface.co/openlm-research/open_llama_3b_v2/raw/main/tokenizer_config.json
|
|
||||||
gg_wget models-mnt/open-llama/3B-v2/ https://huggingface.co/openlm-research/open_llama_3b_v2/raw/main/special_tokens_map.json
|
|
||||||
gg_wget models-mnt/open-llama/3B-v2/ https://huggingface.co/openlm-research/open_llama_3b_v2/resolve/main/pytorch_model.bin
|
|
||||||
gg_wget models-mnt/open-llama/3B-v2/ https://huggingface.co/openlm-research/open_llama_3b_v2/raw/main/generation_config.json
|
|
||||||
|
|
||||||
gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
|
|
||||||
unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/
|
|
||||||
head -n 60 models-mnt/wikitext/wikitext-2-raw/wiki.test.raw > models-mnt/wikitext/wikitext-2-raw/wiki.test-60.raw
|
|
||||||
|
|
||||||
path_models="../models-mnt/open-llama/3B-v2"
|
|
||||||
path_wiki="../models-mnt/wikitext/wikitext-2-raw"
|
|
||||||
|
|
||||||
rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_QKK_64=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
|
||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
|
||||||
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
|
||||||
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
|
||||||
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
|
||||||
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
|
||||||
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
|
||||||
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
|
||||||
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
|
||||||
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
|
||||||
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
|
||||||
|
|
||||||
wiki_test_60="${path_wiki}/wiki.test-60.raw"
|
|
||||||
|
|
||||||
./bin/quantize ${model_f16} ${model_q8_0} q8_0
|
|
||||||
./bin/quantize ${model_f16} ${model_q4_0} q4_0
|
|
||||||
./bin/quantize ${model_f16} ${model_q4_1} q4_1
|
|
||||||
./bin/quantize ${model_f16} ${model_q5_0} q5_0
|
|
||||||
./bin/quantize ${model_f16} ${model_q5_1} q5_1
|
|
||||||
./bin/quantize ${model_f16} ${model_q2_k} q2_k
|
|
||||||
./bin/quantize ${model_f16} ${model_q3_k} q3_k
|
|
||||||
./bin/quantize ${model_f16} ${model_q4_k} q4_k
|
|
||||||
./bin/quantize ${model_f16} ${model_q5_k} q5_k
|
|
||||||
./bin/quantize ${model_f16} ${model_q6_k} q6_k
|
|
||||||
|
|
||||||
(time ./bin/main --model ${model_f16} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
|
||||||
(time ./bin/main --model ${model_q8_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
|
||||||
(time ./bin/main --model ${model_q4_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
|
|
||||||
(time ./bin/main --model ${model_q4_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
|
|
||||||
(time ./bin/main --model ${model_q5_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
|
|
||||||
(time ./bin/main --model ${model_q5_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
|
|
||||||
(time ./bin/main --model ${model_q2_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
|
|
||||||
(time ./bin/main --model ${model_q3_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
|
|
||||||
(time ./bin/main --model ${model_q4_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
|
|
||||||
(time ./bin/main --model ${model_q5_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
|
|
||||||
(time ./bin/main --model ${model_q6_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
|
|
||||||
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
|
||||||
(time ./bin/perplexity --model ${model_q4_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
|
|
||||||
(time ./bin/perplexity --model ${model_q4_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
|
|
||||||
(time ./bin/perplexity --model ${model_q5_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
|
|
||||||
(time ./bin/perplexity --model ${model_q5_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
|
|
||||||
(time ./bin/perplexity --model ${model_q2_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
|
|
||||||
(time ./bin/perplexity --model ${model_q3_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
|
|
||||||
(time ./bin/perplexity --model ${model_q4_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
|
|
||||||
(time ./bin/perplexity --model ${model_q5_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
|
|
||||||
(time ./bin/perplexity --model ${model_q6_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
|
|
||||||
|
|
||||||
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
|
||||||
|
|
||||||
(time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
|
||||||
(time ./bin/save-load-state -fa --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
|
||||||
|
|
||||||
function check_ppl {
|
|
||||||
qnt="$1"
|
|
||||||
ppl=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
|
|
||||||
if [ $(echo "$ppl > 20.0" | bc) -eq 1 ]; then
|
|
||||||
printf ' - %s @ %s (FAIL: ppl > 20.0)\n' "$qnt" "$ppl"
|
|
||||||
return 20
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf ' - %s @ %s OK\n' "$qnt" "$ppl"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
check_ppl "f16" "$(cat $OUT/${ci}-tg-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q5_0" "$(cat $OUT/${ci}-tg-q5_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q5_1" "$(cat $OUT/${ci}-tg-q5_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q2_k" "$(cat $OUT/${ci}-tg-q2_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q3_k" "$(cat $OUT/${ci}-tg-q3_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q4_k" "$(cat $OUT/${ci}-tg-q4_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
|
||||||
|
|
||||||
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
|
||||||
|
|
||||||
# lora
|
|
||||||
function compare_ppl {
|
|
||||||
qnt="$1"
|
|
||||||
ppl1=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
ppl2=$(echo "$3" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
|
|
||||||
if [ $(echo "$ppl1 < $ppl2" | bc) -eq 1 ]; then
|
|
||||||
printf ' - %s @ %s (FAIL: %s > %s)\n' "$qnt" "$ppl" "$ppl1" "$ppl2"
|
|
||||||
return 20
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf ' - %s @ %s %s OK\n' "$qnt" "$ppl1" "$ppl2"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
path_lora="../models-mnt/open-llama/3B-v2/lora"
|
|
||||||
path_shakespeare="../models-mnt/shakespeare"
|
|
||||||
|
|
||||||
shakespeare="${path_shakespeare}/shakespeare.txt"
|
|
||||||
lora_shakespeare="${path_lora}/ggml-adapter-model.bin"
|
|
||||||
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_3b_v2_shakespeare_lora/resolve/main/adapter_config.json
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_3b_v2_shakespeare_lora/resolve/main/adapter_model.bin
|
|
||||||
gg_wget ${path_shakespeare} https://huggingface.co/slaren/open_llama_3b_v2_shakespeare_lora/resolve/main/shakespeare.txt
|
|
||||||
|
|
||||||
python3 ../convert-lora-to-ggml.py ${path_lora}
|
|
||||||
|
|
||||||
# f16
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-f16.log
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} --lora ${lora_shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-f16.log
|
|
||||||
compare_ppl "f16 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-f16.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# q8_0
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-q8_0.log
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0.log
|
|
||||||
compare_ppl "q8_0 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# q8_0 + f16 lora-base
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} --lora-base ${model_f16} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log
|
|
||||||
compare_ppl "q8_0 / f16 base shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
set +e
|
|
||||||
}
|
|
||||||
|
|
||||||
function gg_sum_open_llama_3b_v2 {
|
|
||||||
gg_printf '### %s\n\n' "${ci}"
|
|
||||||
|
|
||||||
gg_printf 'OpenLLaMA 3B-v2:\n'
|
|
||||||
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
|
||||||
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
|
||||||
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
|
||||||
gg_printf '- lora:\n%s\n' "$(cat $OUT/${ci}-lora-ppl.log)"
|
|
||||||
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
|
||||||
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
|
||||||
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
|
||||||
gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)"
|
|
||||||
gg_printf '- q5_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_0.log)"
|
|
||||||
gg_printf '- q5_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_1.log)"
|
|
||||||
gg_printf '- q2_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q2_k.log)"
|
|
||||||
gg_printf '- q3_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q3_k.log)"
|
|
||||||
gg_printf '- q4_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_k.log)"
|
|
||||||
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
|
||||||
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
|
||||||
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
|
||||||
gg_printf '- shakespeare (f16):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-f16.log)"
|
|
||||||
gg_printf '- shakespeare (f16 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log)"
|
|
||||||
gg_printf '- shakespeare (q8_0):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log)"
|
|
||||||
gg_printf '- shakespeare (q8_0 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log)"
|
|
||||||
gg_printf '- shakespeare (q8_0 / f16 base lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# open_llama_7b_v2
|
# open_llama_7b_v2
|
||||||
# requires: GG_BUILD_CUDA
|
# requires: GG_BUILD_CUDA
|
||||||
|
|
||||||
|
@ -464,7 +287,7 @@ function gg_run_open_llama_7b_v2 {
|
||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
python3 ../examples/convert-legacy-llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
|
@ -549,48 +372,6 @@ function gg_run_open_llama_7b_v2 {
|
||||||
|
|
||||||
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
||||||
|
|
||||||
# lora
|
|
||||||
function compare_ppl {
|
|
||||||
qnt="$1"
|
|
||||||
ppl1=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
ppl2=$(echo "$3" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
|
|
||||||
if [ $(echo "$ppl1 < $ppl2" | bc) -eq 1 ]; then
|
|
||||||
printf ' - %s @ %s (FAIL: %s > %s)\n' "$qnt" "$ppl" "$ppl1" "$ppl2"
|
|
||||||
return 20
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf ' - %s @ %s %s OK\n' "$qnt" "$ppl1" "$ppl2"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
path_lora="../models-mnt/open-llama/7B-v2/lora"
|
|
||||||
path_shakespeare="../models-mnt/shakespeare"
|
|
||||||
|
|
||||||
shakespeare="${path_shakespeare}/shakespeare.txt"
|
|
||||||
lora_shakespeare="${path_lora}/ggml-adapter-model.bin"
|
|
||||||
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/adapter_config.json
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/adapter_model.bin
|
|
||||||
gg_wget ${path_shakespeare} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/shakespeare.txt
|
|
||||||
|
|
||||||
python3 ../convert-lora-to-ggml.py ${path_lora}
|
|
||||||
|
|
||||||
# f16
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-f16.log
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} --lora ${lora_shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-f16.log
|
|
||||||
compare_ppl "f16 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-f16.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# currently not supported by the CUDA backend
|
|
||||||
# q8_0
|
|
||||||
#(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-q8_0.log
|
|
||||||
#(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0.log
|
|
||||||
#compare_ppl "q8_0 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# q8_0 + f16 lora-base
|
|
||||||
#(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} --lora-base ${model_f16} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log
|
|
||||||
#compare_ppl "q8_0 / f16 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,7 +382,6 @@ function gg_sum_open_llama_7b_v2 {
|
||||||
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
||||||
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
||||||
gg_printf '- lora:\n%s\n' "$(cat $OUT/${ci}-lora-ppl.log)"
|
|
||||||
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
||||||
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
||||||
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
||||||
|
@ -614,11 +394,272 @@ function gg_sum_open_llama_7b_v2 {
|
||||||
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
||||||
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
||||||
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
||||||
gg_printf '- shakespeare (f16):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-f16.log)"
|
}
|
||||||
gg_printf '- shakespeare (f16 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log)"
|
|
||||||
#gg_printf '- shakespeare (q8_0):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log)"
|
# pythia_1.4b
|
||||||
#gg_printf '- shakespeare (q8_0 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log)"
|
|
||||||
#gg_printf '- shakespeare (q8_0 / f16 base lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log)"
|
function gg_run_pythia_1_4b {
|
||||||
|
cd ${SRC}
|
||||||
|
|
||||||
|
gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/config.json
|
||||||
|
gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/tokenizer.json
|
||||||
|
gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/tokenizer_config.json
|
||||||
|
gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/special_tokens_map.json
|
||||||
|
gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/resolve/main/pytorch_model.bin
|
||||||
|
|
||||||
|
gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
|
||||||
|
unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/
|
||||||
|
head -n 60 models-mnt/wikitext/wikitext-2-raw/wiki.test.raw > models-mnt/wikitext/wikitext-2-raw/wiki.test-60.raw
|
||||||
|
|
||||||
|
path_models="../models-mnt/pythia/1.4B"
|
||||||
|
path_wiki="../models-mnt/wikitext/wikitext-2-raw"
|
||||||
|
|
||||||
|
rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
|
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
|
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
||||||
|
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
||||||
|
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
||||||
|
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
||||||
|
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
||||||
|
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
||||||
|
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
||||||
|
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
||||||
|
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
||||||
|
|
||||||
|
wiki_test_60="${path_wiki}/wiki.test-60.raw"
|
||||||
|
|
||||||
|
./bin/quantize ${model_f16} ${model_q8_0} q8_0
|
||||||
|
./bin/quantize ${model_f16} ${model_q4_0} q4_0
|
||||||
|
./bin/quantize ${model_f16} ${model_q4_1} q4_1
|
||||||
|
./bin/quantize ${model_f16} ${model_q5_0} q5_0
|
||||||
|
./bin/quantize ${model_f16} ${model_q5_1} q5_1
|
||||||
|
./bin/quantize ${model_f16} ${model_q2_k} q2_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q3_k} q3_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q4_k} q4_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q5_k} q5_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q6_k} q6_k
|
||||||
|
|
||||||
|
(time ./bin/main --model ${model_f16} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
||||||
|
(time ./bin/main --model ${model_q8_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
||||||
|
(time ./bin/main --model ${model_q4_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
|
||||||
|
(time ./bin/main --model ${model_q4_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
|
||||||
|
(time ./bin/main --model ${model_q5_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
|
||||||
|
(time ./bin/main --model ${model_q5_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
|
||||||
|
(time ./bin/main --model ${model_q2_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
|
||||||
|
(time ./bin/main --model ${model_q3_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
|
||||||
|
(time ./bin/main --model ${model_q4_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
|
||||||
|
(time ./bin/main --model ${model_q5_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
|
||||||
|
(time ./bin/main --model ${model_q6_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
|
||||||
|
|
||||||
|
(time ./bin/perplexity --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
||||||
|
(time ./bin/perplexity --model ${model_q8_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
||||||
|
(time ./bin/perplexity --model ${model_q4_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
|
||||||
|
(time ./bin/perplexity --model ${model_q4_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
|
||||||
|
(time ./bin/perplexity --model ${model_q5_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
|
||||||
|
(time ./bin/perplexity --model ${model_q5_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
|
||||||
|
(time ./bin/perplexity --model ${model_q2_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q3_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q4_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q5_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q6_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
|
||||||
|
|
||||||
|
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
||||||
|
|
||||||
|
(time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -fa --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
|
||||||
|
function check_ppl {
|
||||||
|
qnt="$1"
|
||||||
|
ppl=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
||||||
|
|
||||||
|
if [ $(echo "$ppl > 20.0" | bc) -eq 1 ]; then
|
||||||
|
printf ' - %s @ %s (FAIL: ppl > 20.0)\n' "$qnt" "$ppl"
|
||||||
|
return 20
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf ' - %s @ %s OK\n' "$qnt" "$ppl"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
check_ppl "f16" "$(cat $OUT/${ci}-tg-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q5_0" "$(cat $OUT/${ci}-tg-q5_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q5_1" "$(cat $OUT/${ci}-tg-q5_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
#check_ppl "q2_k" "$(cat $OUT/${ci}-tg-q2_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log # note: ppl > 20.0 for this quant and model
|
||||||
|
check_ppl "q3_k" "$(cat $OUT/${ci}-tg-q3_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q4_k" "$(cat $OUT/${ci}-tg-q4_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
|
||||||
|
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
||||||
|
|
||||||
|
set +e
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_sum_pythia_1_4b {
|
||||||
|
gg_printf '### %s\n\n' "${ci}"
|
||||||
|
|
||||||
|
gg_printf 'Pythia 1.4B:\n'
|
||||||
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
|
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
||||||
|
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
||||||
|
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
||||||
|
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
||||||
|
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
||||||
|
gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)"
|
||||||
|
gg_printf '- q5_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_0.log)"
|
||||||
|
gg_printf '- q5_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_1.log)"
|
||||||
|
gg_printf '- q2_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q2_k.log)"
|
||||||
|
gg_printf '- q3_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q3_k.log)"
|
||||||
|
gg_printf '- q4_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_k.log)"
|
||||||
|
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
||||||
|
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
||||||
|
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# pythia_2_8b
|
||||||
|
# requires: GG_BUILD_CUDA
|
||||||
|
|
||||||
|
function gg_run_pythia_2_8b {
|
||||||
|
cd ${SRC}
|
||||||
|
|
||||||
|
gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/config.json
|
||||||
|
gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/tokenizer.json
|
||||||
|
gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/tokenizer_config.json
|
||||||
|
gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/special_tokens_map.json
|
||||||
|
gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/resolve/main/pytorch_model.bin
|
||||||
|
|
||||||
|
gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
|
||||||
|
unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/
|
||||||
|
|
||||||
|
path_models="../models-mnt/pythia/2.8B"
|
||||||
|
path_wiki="../models-mnt/wikitext/wikitext-2-raw"
|
||||||
|
|
||||||
|
rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
|
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
|
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
||||||
|
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
||||||
|
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
||||||
|
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
||||||
|
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
||||||
|
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
||||||
|
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
||||||
|
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
||||||
|
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
||||||
|
|
||||||
|
wiki_test="${path_wiki}/wiki.test.raw"
|
||||||
|
|
||||||
|
./bin/quantize ${model_f16} ${model_q8_0} q8_0
|
||||||
|
./bin/quantize ${model_f16} ${model_q4_0} q4_0
|
||||||
|
./bin/quantize ${model_f16} ${model_q4_1} q4_1
|
||||||
|
./bin/quantize ${model_f16} ${model_q5_0} q5_0
|
||||||
|
./bin/quantize ${model_f16} ${model_q5_1} q5_1
|
||||||
|
./bin/quantize ${model_f16} ${model_q2_k} q2_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q3_k} q3_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q4_k} q4_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q5_k} q5_k
|
||||||
|
./bin/quantize ${model_f16} ${model_q6_k} q6_k
|
||||||
|
|
||||||
|
(time ./bin/main --model ${model_f16} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
||||||
|
(time ./bin/main --model ${model_q8_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
||||||
|
(time ./bin/main --model ${model_q4_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
|
||||||
|
(time ./bin/main --model ${model_q4_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
|
||||||
|
(time ./bin/main --model ${model_q5_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
|
||||||
|
(time ./bin/main --model ${model_q5_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
|
||||||
|
(time ./bin/main --model ${model_q2_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
|
||||||
|
(time ./bin/main --model ${model_q3_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
|
||||||
|
(time ./bin/main --model ${model_q4_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
|
||||||
|
(time ./bin/main --model ${model_q5_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
|
||||||
|
(time ./bin/main --model ${model_q6_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
|
||||||
|
|
||||||
|
(time ./bin/perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
||||||
|
(time ./bin/perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
||||||
|
(time ./bin/perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
|
||||||
|
(time ./bin/perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
|
||||||
|
(time ./bin/perplexity --model ${model_q5_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
|
||||||
|
(time ./bin/perplexity --model ${model_q5_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
|
||||||
|
(time ./bin/perplexity --model ${model_q2_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q3_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q4_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
|
||||||
|
(time ./bin/perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
|
||||||
|
|
||||||
|
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
||||||
|
|
||||||
|
(time ./bin/save-load-state -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -fa -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -fa -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
|
||||||
|
function check_ppl {
|
||||||
|
qnt="$1"
|
||||||
|
ppl=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
||||||
|
|
||||||
|
if [ $(echo "$ppl > 20.0" | bc) -eq 1 ]; then
|
||||||
|
printf ' - %s @ %s (FAIL: ppl > 20.0)\n' "$qnt" "$ppl"
|
||||||
|
return 20
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf ' - %s @ %s OK\n' "$qnt" "$ppl"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
check_ppl "f16" "$(cat $OUT/${ci}-tg-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q5_0" "$(cat $OUT/${ci}-tg-q5_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q5_1" "$(cat $OUT/${ci}-tg-q5_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
#check_ppl "q2_k" "$(cat $OUT/${ci}-tg-q2_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log # note: ppl > 20.0 for this quant and model
|
||||||
|
check_ppl "q3_k" "$(cat $OUT/${ci}-tg-q3_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q4_k" "$(cat $OUT/${ci}-tg-q4_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
|
||||||
|
|
||||||
|
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
||||||
|
|
||||||
|
set +e
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_sum_pythia_2_8b {
|
||||||
|
gg_printf '### %s\n\n' "${ci}"
|
||||||
|
|
||||||
|
gg_printf 'Pythia 2.8B:\n'
|
||||||
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
|
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
||||||
|
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
||||||
|
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
||||||
|
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
||||||
|
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
||||||
|
gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)"
|
||||||
|
gg_printf '- q5_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_0.log)"
|
||||||
|
gg_printf '- q5_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_1.log)"
|
||||||
|
gg_printf '- q2_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q2_k.log)"
|
||||||
|
gg_printf '- q3_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q3_k.log)"
|
||||||
|
gg_printf '- q4_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_k.log)"
|
||||||
|
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
||||||
|
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
||||||
|
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
||||||
}
|
}
|
||||||
|
|
||||||
# bge-small
|
# bge-small
|
||||||
|
@ -647,7 +688,7 @@ function gg_run_embd_bge_small {
|
||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert-hf-to-gguf.py ${path_models}
|
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
|
@ -701,9 +742,10 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
||||||
|
|
||||||
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
|
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
|
||||||
if [ -z ${GG_BUILD_CUDA} ]; then
|
if [ -z ${GG_BUILD_CUDA} ]; then
|
||||||
test $ret -eq 0 && gg_run open_llama_3b_v2
|
test $ret -eq 0 && gg_run pythia_1_4b
|
||||||
else
|
else
|
||||||
test $ret -eq 0 && gg_run open_llama_7b_v2
|
test $ret -eq 0 && gg_run pythia_2_8b
|
||||||
|
#test $ret -eq 0 && gg_run open_llama_7b_v2
|
||||||
fi
|
fi
|
||||||
test $ret -eq 0 && gg_run ctest_with_model_debug
|
test $ret -eq 0 && gg_run ctest_with_model_debug
|
||||||
test $ret -eq 0 && gg_run ctest_with_model_release
|
test $ret -eq 0 && gg_run ctest_with_model_release
|
||||||
|
|
16
cmake/arm64-windows-llvm.cmake
Normal file
16
cmake/arm64-windows-llvm.cmake
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
set( CMAKE_SYSTEM_NAME Windows )
|
||||||
|
set( CMAKE_SYSTEM_PROCESSOR arm64 )
|
||||||
|
|
||||||
|
set( target arm64-pc-windows-msvc )
|
||||||
|
|
||||||
|
set( CMAKE_C_COMPILER clang )
|
||||||
|
set( CMAKE_CXX_COMPILER clang++ )
|
||||||
|
|
||||||
|
set( CMAKE_C_COMPILER_TARGET ${target} )
|
||||||
|
set( CMAKE_CXX_COMPILER_TARGET ${target} )
|
||||||
|
|
||||||
|
set( arch_c_flags "-march=armv8.7-a -fvectorize -ffp-model=fast" )
|
||||||
|
set( warn_c_flags "-Wno-format -Wno-unused-variable -Wno-unused-function -Wno-gnu-zero-variadic-macro-arguments" )
|
||||||
|
|
||||||
|
set( CMAKE_C_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" )
|
||||||
|
set( CMAKE_CXX_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" )
|
6
cmake/arm64-windows-msvc.cmake
Normal file
6
cmake/arm64-windows-msvc.cmake
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
set( CMAKE_SYSTEM_NAME Windows )
|
||||||
|
set( CMAKE_SYSTEM_PROCESSOR arm64 )
|
||||||
|
|
||||||
|
set( target arm64-pc-windows-msvc )
|
||||||
|
set( CMAKE_C_COMPILER_TARGET ${target} )
|
||||||
|
set( CMAKE_CXX_COMPILER_TARGET ${target} )
|
1346
common/common.cpp
1346
common/common.cpp
File diff suppressed because it is too large
Load diff
|
@ -35,14 +35,18 @@
|
||||||
|
|
||||||
// build info
|
// build info
|
||||||
extern int LLAMA_BUILD_NUMBER;
|
extern int LLAMA_BUILD_NUMBER;
|
||||||
extern char const *LLAMA_COMMIT;
|
extern char const * LLAMA_COMMIT;
|
||||||
extern char const *LLAMA_COMPILER;
|
extern char const * LLAMA_COMPILER;
|
||||||
extern char const *LLAMA_BUILD_TARGET;
|
extern char const * LLAMA_BUILD_TARGET;
|
||||||
|
|
||||||
struct llama_control_vector_load_info;
|
struct llama_control_vector_load_info;
|
||||||
|
|
||||||
int get_math_cpu_count();
|
//
|
||||||
int32_t get_num_physical_cores();
|
// CPU utils
|
||||||
|
//
|
||||||
|
|
||||||
|
int32_t cpu_get_num_physical_cores();
|
||||||
|
int32_t cpu_get_num_math();
|
||||||
|
|
||||||
//
|
//
|
||||||
// CLI argument parsing
|
// CLI argument parsing
|
||||||
|
@ -51,7 +55,7 @@ int32_t get_num_physical_cores();
|
||||||
struct gpt_params {
|
struct gpt_params {
|
||||||
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
|
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
|
||||||
|
|
||||||
int32_t n_threads = get_math_cpu_count();
|
int32_t n_threads = cpu_get_num_math();
|
||||||
int32_t n_threads_draft = -1;
|
int32_t n_threads_draft = -1;
|
||||||
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
|
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
|
||||||
int32_t n_threads_batch_draft = -1;
|
int32_t n_threads_batch_draft = -1;
|
||||||
|
@ -82,6 +86,7 @@ struct gpt_params {
|
||||||
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
||||||
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
||||||
float defrag_thold = -1.0f; // KV cache defragmentation threshold
|
float defrag_thold = -1.0f; // KV cache defragmentation threshold
|
||||||
|
std::string rpc_servers = ""; // comma separated list of RPC servers
|
||||||
|
|
||||||
ggml_backend_sched_eval_callback cb_eval = nullptr;
|
ggml_backend_sched_eval_callback cb_eval = nullptr;
|
||||||
void * cb_eval_user_data = nullptr;
|
void * cb_eval_user_data = nullptr;
|
||||||
|
@ -140,6 +145,8 @@ struct gpt_params {
|
||||||
bool random_prompt = false; // do not randomize prompt if none provided
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
bool interactive = false; // interactive mode
|
bool interactive = false; // interactive mode
|
||||||
|
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
|
||||||
|
bool special = false; // enable special token output
|
||||||
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
||||||
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
||||||
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
||||||
|
@ -177,33 +184,34 @@ struct gpt_params {
|
||||||
|
|
||||||
void gpt_params_handle_model_default(gpt_params & params);
|
void gpt_params_handle_model_default(gpt_params & params);
|
||||||
|
|
||||||
bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
bool gpt_params_parse_ex (int argc, char ** argv, gpt_params & params);
|
||||||
|
bool gpt_params_parse (int argc, char ** argv, gpt_params & params);
|
||||||
|
bool gpt_params_find_arg (int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param);
|
||||||
|
void gpt_params_print_usage(int argc, char ** argv, const gpt_params & params);
|
||||||
|
|
||||||
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
|
std::string gpt_params_get_system_info(const gpt_params & params);
|
||||||
|
|
||||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
|
||||||
|
|
||||||
void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
|
||||||
|
|
||||||
bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param);
|
|
||||||
|
|
||||||
std::string get_system_info(const gpt_params & params);
|
|
||||||
|
|
||||||
std::string gpt_random_prompt(std::mt19937 & rng);
|
|
||||||
|
|
||||||
void process_escapes(std::string& input);
|
|
||||||
|
|
||||||
bool validate_file_name(const std::string & filename);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// String utils
|
// String utils
|
||||||
//
|
//
|
||||||
|
|
||||||
std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
|
|
||||||
std::vector<llama_sampler_type> sampler_types_from_chars(const std::string & names_string);
|
|
||||||
std::vector<std::string> string_split(std::string input, char separator);
|
std::vector<std::string> string_split(std::string input, char separator);
|
||||||
|
|
||||||
std::string string_strip(const std::string & str);
|
std::string string_strip(const std::string & str);
|
||||||
std::string sampler_type_to_name_string(llama_sampler_type sampler_type);
|
std::string string_get_sortable_timestamp();
|
||||||
|
std::string string_random_prompt(std::mt19937 & rng);
|
||||||
|
|
||||||
|
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
||||||
|
void string_process_escapes(std::string & input);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Filesystem utils
|
||||||
|
//
|
||||||
|
|
||||||
|
bool fs_validate_filename(const std::string & filename);
|
||||||
|
bool fs_create_directory_with_parents(const std::string & path);
|
||||||
|
|
||||||
|
std::string fs_get_cache_directory();
|
||||||
|
|
||||||
//
|
//
|
||||||
// Model utils
|
// Model utils
|
||||||
|
@ -274,29 +282,15 @@ std::string llama_detokenize_bpe(
|
||||||
// defaults to true when model type is SPM, otherwise false.
|
// defaults to true when model type is SPM, otherwise false.
|
||||||
bool llama_should_add_bos_token(const llama_model * model);
|
bool llama_should_add_bos_token(const llama_model * model);
|
||||||
|
|
||||||
//
|
|
||||||
// YAML utils
|
|
||||||
//
|
|
||||||
|
|
||||||
bool create_directory_with_parents(const std::string & path);
|
|
||||||
void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
|
|
||||||
void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
|
|
||||||
void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
|
|
||||||
std::string get_sortable_timestamp();
|
|
||||||
|
|
||||||
void dump_non_result_info_yaml(
|
|
||||||
FILE * stream, const gpt_params & params, const llama_context * lctx,
|
|
||||||
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// KV cache utils
|
// KV cache utils
|
||||||
//
|
//
|
||||||
|
|
||||||
// Dump the KV cache view with the number of sequences per cell.
|
// Dump the KV cache view with the number of sequences per cell.
|
||||||
void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
|
void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
||||||
|
|
||||||
// Dump the KV cache view showing individual sequences in each cell (long output).
|
// Dump the KV cache view showing individual sequences in each cell (long output).
|
||||||
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Embedding utils
|
// Embedding utils
|
||||||
|
@ -330,6 +324,20 @@ llama_control_vector_data llama_control_vector_load(const std::vector<llama_cont
|
||||||
//
|
//
|
||||||
// Split utils
|
// Split utils
|
||||||
//
|
//
|
||||||
|
|
||||||
static const char * const LLM_KV_SPLIT_NO = "split.no";
|
static const char * const LLM_KV_SPLIT_NO = "split.no";
|
||||||
static const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
static const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
||||||
static const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
static const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
||||||
|
|
||||||
|
//
|
||||||
|
// YAML utils
|
||||||
|
//
|
||||||
|
|
||||||
|
void yaml_dump_vector_float (FILE * stream, const char * prop_name, const std::vector<float> & data);
|
||||||
|
void yaml_dump_vector_int (FILE * stream, const char * prop_name, const std::vector<int> & data);
|
||||||
|
void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const char * data);
|
||||||
|
|
||||||
|
void yaml_dump_non_result_info(
|
||||||
|
FILE * stream, const gpt_params & params, const llama_context * lctx,
|
||||||
|
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ namespace grammar_parser {
|
||||||
|
|
||||||
static uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
static uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
||||||
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
auto result = state.symbol_ids.emplace(std::string(src, len), next_id);
|
||||||
return result.first->second;
|
return result.first->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,6 +142,9 @@ namespace grammar_parser {
|
||||||
pos++;
|
pos++;
|
||||||
last_sym_start = out_elements.size();
|
last_sym_start = out_elements.size();
|
||||||
while (*pos != '"') {
|
while (*pos != '"') {
|
||||||
|
if (!*pos) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto char_pair = parse_char(pos);
|
auto char_pair = parse_char(pos);
|
||||||
pos = char_pair.second;
|
pos = char_pair.second;
|
||||||
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
||||||
|
@ -156,6 +159,9 @@ namespace grammar_parser {
|
||||||
}
|
}
|
||||||
last_sym_start = out_elements.size();
|
last_sym_start = out_elements.size();
|
||||||
while (*pos != ']') {
|
while (*pos != ']') {
|
||||||
|
if (!*pos) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto char_pair = parse_char(pos);
|
auto char_pair = parse_char(pos);
|
||||||
pos = char_pair.second;
|
pos = char_pair.second;
|
||||||
enum llama_gretype type = last_sym_start < out_elements.size()
|
enum llama_gretype type = last_sym_start < out_elements.size()
|
||||||
|
@ -164,6 +170,9 @@ namespace grammar_parser {
|
||||||
|
|
||||||
out_elements.push_back({type, char_pair.first});
|
out_elements.push_back({type, char_pair.first});
|
||||||
if (pos[0] == '-' && pos[1] != ']') {
|
if (pos[0] == '-' && pos[1] != ']') {
|
||||||
|
if (!pos[1]) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto endchar_pair = parse_char(pos + 1);
|
auto endchar_pair = parse_char(pos + 1);
|
||||||
pos = endchar_pair.second;
|
pos = endchar_pair.second;
|
||||||
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
||||||
|
|
|
@ -272,7 +272,7 @@ private:
|
||||||
if (literal.empty()) {
|
if (literal.empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ret.push_back(std::make_pair(literal, true));
|
ret.emplace_back(literal, true);
|
||||||
literal.clear();
|
literal.clear();
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
|
@ -298,7 +298,7 @@ private:
|
||||||
while (i < length) {
|
while (i < length) {
|
||||||
char c = sub_pattern[i];
|
char c = sub_pattern[i];
|
||||||
if (c == '.') {
|
if (c == '.') {
|
||||||
seq.push_back(std::make_pair(get_dot(), false));
|
seq.emplace_back(get_dot(), false);
|
||||||
i++;
|
i++;
|
||||||
} else if (c == '(') {
|
} else if (c == '(') {
|
||||||
i++;
|
i++;
|
||||||
|
@ -307,7 +307,7 @@ private:
|
||||||
_warnings.push_back("Unsupported pattern syntax");
|
_warnings.push_back("Unsupported pattern syntax");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
seq.push_back(std::make_pair("(" + to_rule(transform()) + ")", false));
|
seq.emplace_back("(" + to_rule(transform()) + ")", false);
|
||||||
} else if (c == ')') {
|
} else if (c == ')') {
|
||||||
i++;
|
i++;
|
||||||
if (start > 0 && sub_pattern[start - 1] != '(') {
|
if (start > 0 && sub_pattern[start - 1] != '(') {
|
||||||
|
@ -331,9 +331,9 @@ private:
|
||||||
}
|
}
|
||||||
square_brackets += ']';
|
square_brackets += ']';
|
||||||
i++;
|
i++;
|
||||||
seq.push_back(std::make_pair(square_brackets, false));
|
seq.emplace_back(square_brackets, false);
|
||||||
} else if (c == '|') {
|
} else if (c == '|') {
|
||||||
seq.push_back(std::make_pair("|", false));
|
seq.emplace_back("|", false);
|
||||||
i++;
|
i++;
|
||||||
} else if (c == '*' || c == '+' || c == '?') {
|
} else if (c == '*' || c == '+' || c == '?') {
|
||||||
seq.back() = std::make_pair(to_rule(seq.back()) + c, false);
|
seq.back() = std::make_pair(to_rule(seq.back()) + c, false);
|
||||||
|
@ -417,7 +417,7 @@ private:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!literal.empty()) {
|
if (!literal.empty()) {
|
||||||
seq.push_back(std::make_pair(literal, true));
|
seq.emplace_back(literal, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
10
common/log.h
10
common/log.h
|
@ -211,7 +211,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
||||||
#define LOG_FLF_VAL , __FILE__, __LINE__, __FUNCTION__
|
#define LOG_FLF_VAL , __FILE__, __LINE__, __FUNCTION__
|
||||||
#else
|
#else
|
||||||
#define LOG_FLF_FMT "[%24s:%5ld][%24s] "
|
#define LOG_FLF_FMT "[%24s:%5ld][%24s] "
|
||||||
#define LOG_FLF_VAL , __FILE__, __LINE__, __FUNCTION__
|
#define LOG_FLF_VAL , __FILE__, (long)__LINE__, __FUNCTION__
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
#define LOG_FLF_FMT "%s"
|
#define LOG_FLF_FMT "%s"
|
||||||
|
@ -224,7 +224,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
||||||
#define LOG_TEE_FLF_VAL , __FILE__, __LINE__, __FUNCTION__
|
#define LOG_TEE_FLF_VAL , __FILE__, __LINE__, __FUNCTION__
|
||||||
#else
|
#else
|
||||||
#define LOG_TEE_FLF_FMT "[%24s:%5ld][%24s] "
|
#define LOG_TEE_FLF_FMT "[%24s:%5ld][%24s] "
|
||||||
#define LOG_TEE_FLF_VAL , __FILE__, __LINE__, __FUNCTION__
|
#define LOG_TEE_FLF_VAL , __FILE__, (long)__LINE__, __FUNCTION__
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
#define LOG_TEE_FLF_FMT "%s"
|
#define LOG_TEE_FLF_FMT "%s"
|
||||||
|
@ -294,7 +294,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
||||||
// Main LOG macro.
|
// Main LOG macro.
|
||||||
// behaves like printf, and supports arguments the exact same way.
|
// behaves like printf, and supports arguments the exact same way.
|
||||||
//
|
//
|
||||||
#ifndef _MSC_VER
|
#if !defined(_MSC_VER) || defined(__clang__)
|
||||||
#define LOG(...) LOG_IMPL(__VA_ARGS__, "")
|
#define LOG(...) LOG_IMPL(__VA_ARGS__, "")
|
||||||
#else
|
#else
|
||||||
#define LOG(str, ...) LOG_IMPL("%s" str, "", ##__VA_ARGS__, "")
|
#define LOG(str, ...) LOG_IMPL("%s" str, "", ##__VA_ARGS__, "")
|
||||||
|
@ -308,14 +308,14 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
||||||
// Secondary target can be changed just like LOG_TARGET
|
// Secondary target can be changed just like LOG_TARGET
|
||||||
// by defining LOG_TEE_TARGET
|
// by defining LOG_TEE_TARGET
|
||||||
//
|
//
|
||||||
#ifndef _MSC_VER
|
#if !defined(_MSC_VER) || defined(__clang__)
|
||||||
#define LOG_TEE(...) LOG_TEE_IMPL(__VA_ARGS__, "")
|
#define LOG_TEE(...) LOG_TEE_IMPL(__VA_ARGS__, "")
|
||||||
#else
|
#else
|
||||||
#define LOG_TEE(str, ...) LOG_TEE_IMPL("%s" str, "", ##__VA_ARGS__, "")
|
#define LOG_TEE(str, ...) LOG_TEE_IMPL("%s" str, "", ##__VA_ARGS__, "")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// LOG macro variants with auto endline.
|
// LOG macro variants with auto endline.
|
||||||
#ifndef _MSC_VER
|
#if !defined(_MSC_VER) || defined(__clang__)
|
||||||
#define LOGLN(...) LOG_IMPL(__VA_ARGS__, "\n")
|
#define LOGLN(...) LOG_IMPL(__VA_ARGS__, "\n")
|
||||||
#define LOG_TEELN(...) LOG_TEE_IMPL(__VA_ARGS__, "\n")
|
#define LOG_TEELN(...) LOG_TEE_IMPL(__VA_ARGS__, "\n")
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -35,7 +35,7 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
|
||||||
|
|
||||||
result->prev.resize(params.n_prev);
|
result->prev.resize(params.n_prev);
|
||||||
|
|
||||||
result->n_considered = 0;
|
result->n_valid = 0;
|
||||||
|
|
||||||
llama_sampling_set_rng_seed(result, params.seed);
|
llama_sampling_set_rng_seed(result, params.seed);
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ void llama_sampling_reset(llama_sampling_context * ctx) {
|
||||||
|
|
||||||
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
||||||
ctx->cur.clear();
|
ctx->cur.clear();
|
||||||
ctx->n_considered = 0;
|
ctx->n_valid = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
||||||
|
@ -125,7 +125,7 @@ std::string llama_sampling_order_print(const llama_sampling_params & params) {
|
||||||
std::string result = "CFG -> Penalties ";
|
std::string result = "CFG -> Penalties ";
|
||||||
if (params.mirostat == 0) {
|
if (params.mirostat == 0) {
|
||||||
for (auto sampler_type : params.samplers_sequence) {
|
for (auto sampler_type : params.samplers_sequence) {
|
||||||
const auto sampler_type_name = sampler_type_to_name_string(sampler_type);
|
const auto sampler_type_name = llama_sampling_type_to_str(sampler_type);
|
||||||
if (!sampler_type_name.empty()) {
|
if (!sampler_type_name.empty()) {
|
||||||
result += "-> " + sampler_type_name + " ";
|
result += "-> " + sampler_type_name + " ";
|
||||||
}
|
}
|
||||||
|
@ -137,6 +137,87 @@ std::string llama_sampling_order_print(const llama_sampling_params & params) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string llama_sampling_type_to_str(llama_sampler_type sampler_type) {
|
||||||
|
switch (sampler_type) {
|
||||||
|
case llama_sampler_type::TOP_K: return "top_k";
|
||||||
|
case llama_sampler_type::TFS_Z: return "tfs_z";
|
||||||
|
case llama_sampler_type::TYPICAL_P: return "typical_p";
|
||||||
|
case llama_sampler_type::TOP_P: return "top_p";
|
||||||
|
case llama_sampler_type::MIN_P: return "min_p";
|
||||||
|
case llama_sampler_type::TEMPERATURE: return "temperature";
|
||||||
|
default : return "";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<llama_sampler_type> llama_sampling_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
|
||||||
|
std::unordered_map<std::string, llama_sampler_type> sampler_canonical_name_map {
|
||||||
|
{"top_k", llama_sampler_type::TOP_K},
|
||||||
|
{"top_p", llama_sampler_type::TOP_P},
|
||||||
|
{"typical_p", llama_sampler_type::TYPICAL_P},
|
||||||
|
{"min_p", llama_sampler_type::MIN_P},
|
||||||
|
{"tfs_z", llama_sampler_type::TFS_Z},
|
||||||
|
{"temperature", llama_sampler_type::TEMPERATURE}
|
||||||
|
};
|
||||||
|
|
||||||
|
// since samplers names are written multiple ways
|
||||||
|
// make it ready for both system names and input names
|
||||||
|
std::unordered_map<std::string, llama_sampler_type> sampler_alt_name_map {
|
||||||
|
{"top-k", llama_sampler_type::TOP_K},
|
||||||
|
{"top-p", llama_sampler_type::TOP_P},
|
||||||
|
{"nucleus", llama_sampler_type::TOP_P},
|
||||||
|
{"typical-p", llama_sampler_type::TYPICAL_P},
|
||||||
|
{"typical", llama_sampler_type::TYPICAL_P},
|
||||||
|
{"min-p", llama_sampler_type::MIN_P},
|
||||||
|
{"tfs-z", llama_sampler_type::TFS_Z},
|
||||||
|
{"tfs", llama_sampler_type::TFS_Z},
|
||||||
|
{"temp", llama_sampler_type::TEMPERATURE}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<llama_sampler_type> sampler_types;
|
||||||
|
sampler_types.reserve(names.size());
|
||||||
|
for (const auto & name : names)
|
||||||
|
{
|
||||||
|
auto sampler_item = sampler_canonical_name_map.find(name);
|
||||||
|
if (sampler_item != sampler_canonical_name_map.end())
|
||||||
|
{
|
||||||
|
sampler_types.push_back(sampler_item->second);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (allow_alt_names)
|
||||||
|
{
|
||||||
|
sampler_item = sampler_alt_name_map.find(name);
|
||||||
|
if (sampler_item != sampler_alt_name_map.end())
|
||||||
|
{
|
||||||
|
sampler_types.push_back(sampler_item->second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sampler_types;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<llama_sampler_type> llama_sampling_types_from_chars(const std::string & names_string) {
|
||||||
|
std::unordered_map<char, llama_sampler_type> sampler_name_map {
|
||||||
|
{'k', llama_sampler_type::TOP_K},
|
||||||
|
{'p', llama_sampler_type::TOP_P},
|
||||||
|
{'y', llama_sampler_type::TYPICAL_P},
|
||||||
|
{'m', llama_sampler_type::MIN_P},
|
||||||
|
{'f', llama_sampler_type::TFS_Z},
|
||||||
|
{'t', llama_sampler_type::TEMPERATURE}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<llama_sampler_type> sampler_types;
|
||||||
|
sampler_types.reserve(names_string.size());
|
||||||
|
for (const auto & c : names_string) {
|
||||||
|
const auto sampler_item = sampler_name_map.find(c);
|
||||||
|
if (sampler_item != sampler_name_map.end()) {
|
||||||
|
sampler_types.push_back(sampler_item->second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sampler_types;
|
||||||
|
}
|
||||||
|
|
||||||
// no reasons to expose this function in header
|
// no reasons to expose this function in header
|
||||||
static void sampler_queue(
|
static void sampler_queue(
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
|
@ -179,7 +260,7 @@ static llama_token llama_sampling_sample_impl(
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
const int idx,
|
const int idx,
|
||||||
bool is_resampling) { // Add a parameter to indicate if we are resampling
|
bool is_resampling) {
|
||||||
const llama_sampling_params & params = ctx_sampling->params;
|
const llama_sampling_params & params = ctx_sampling->params;
|
||||||
|
|
||||||
const float temp = params.temp;
|
const float temp = params.temp;
|
||||||
|
@ -188,8 +269,8 @@ static llama_token llama_sampling_sample_impl(
|
||||||
const float mirostat_eta = params.mirostat_eta;
|
const float mirostat_eta = params.mirostat_eta;
|
||||||
|
|
||||||
std::vector<float> original_logits;
|
std::vector<float> original_logits;
|
||||||
auto cur_p = llama_sampling_prepare(ctx_sampling, ctx_main, ctx_cfg, idx, !is_resampling, &original_logits);
|
auto cur_p = llama_sampling_prepare(ctx_sampling, ctx_main, ctx_cfg, idx, /* apply_grammar= */ is_resampling, &original_logits);
|
||||||
if (!is_resampling) {
|
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
||||||
GGML_ASSERT(!original_logits.empty());
|
GGML_ASSERT(!original_logits.empty());
|
||||||
}
|
}
|
||||||
llama_token id = 0;
|
llama_token id = 0;
|
||||||
|
@ -252,11 +333,11 @@ static llama_token llama_sampling_sample_impl(
|
||||||
// Restore logits from the copy
|
// Restore logits from the copy
|
||||||
std::copy(original_logits.begin(), original_logits.end(), logits);
|
std::copy(original_logits.begin(), original_logits.end(), logits);
|
||||||
|
|
||||||
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, true); // Pass true for is_resampling
|
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, /* is_resampling= */ true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx_sampling->n_considered = cur_p.size;
|
ctx_sampling->n_valid = temp == 0.0f ? 0 : cur_p.size;
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
@ -285,7 +366,8 @@ static llama_token_data_array llama_sampling_prepare_impl(
|
||||||
// Get a pointer to the logits
|
// Get a pointer to the logits
|
||||||
float * logits = llama_get_logits_ith(ctx_main, idx);
|
float * logits = llama_get_logits_ith(ctx_main, idx);
|
||||||
|
|
||||||
if (apply_grammar && original_logits != NULL) {
|
if (ctx_sampling->grammar != NULL && !apply_grammar) {
|
||||||
|
GGML_ASSERT(original_logits != NULL);
|
||||||
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
||||||
*original_logits = {logits, logits + llama_n_vocab(llama_get_model(ctx_main))};
|
*original_logits = {logits, logits + llama_n_vocab(llama_get_model(ctx_main))};
|
||||||
}
|
}
|
||||||
|
@ -342,7 +424,7 @@ llama_token llama_sampling_sample(
|
||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
const int idx) {
|
const int idx) {
|
||||||
// Call the implementation function with is_resampling set to false by default
|
// Call the implementation function with is_resampling set to false by default
|
||||||
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, false);
|
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, /* is_resampling= */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token_data_array llama_sampling_prepare(
|
llama_token_data_array llama_sampling_prepare(
|
||||||
|
|
|
@ -81,7 +81,7 @@ struct llama_sampling_context {
|
||||||
// TODO: replace with ring-buffer
|
// TODO: replace with ring-buffer
|
||||||
std::vector<llama_token> prev;
|
std::vector<llama_token> prev;
|
||||||
std::vector<llama_token_data> cur;
|
std::vector<llama_token_data> cur;
|
||||||
size_t n_considered;
|
size_t n_valid; // Number of correct top tokens with correct probabilities.
|
||||||
|
|
||||||
std::mt19937 rng;
|
std::mt19937 rng;
|
||||||
};
|
};
|
||||||
|
@ -116,6 +116,11 @@ std::string llama_sampling_print(const llama_sampling_params & params);
|
||||||
// Print sampling order into a string
|
// Print sampling order into a string
|
||||||
std::string llama_sampling_order_print(const llama_sampling_params & params);
|
std::string llama_sampling_order_print(const llama_sampling_params & params);
|
||||||
|
|
||||||
|
std::string llama_sampling_type_to_str(llama_sampler_type sampler_type);
|
||||||
|
|
||||||
|
std::vector<llama_sampler_type> llama_sampling_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
|
||||||
|
std::vector<llama_sampler_type> llama_sampling_types_from_chars(const std::string & names_string);
|
||||||
|
|
||||||
// this is a common sampling function used across the examples for convenience
|
// this is a common sampling function used across the examples for convenience
|
||||||
// it can serve as a starting point for implementing your own sampling function
|
// it can serve as a starting point for implementing your own sampling function
|
||||||
// Note: When using multiple sequences, it is the caller's responsibility to call
|
// Note: When using multiple sequences, it is the caller's responsibility to call
|
||||||
|
|
|
@ -1052,7 +1052,7 @@ struct train_params_common get_default_train_params_common() {
|
||||||
|
|
||||||
params.custom_n_ctx = false;
|
params.custom_n_ctx = false;
|
||||||
|
|
||||||
params.use_flash = true;
|
params.use_flash = false;
|
||||||
params.use_checkpointing = true;
|
params.use_checkpointing = true;
|
||||||
|
|
||||||
params.sample_start = "";
|
params.sample_start = "";
|
||||||
|
@ -1380,7 +1380,7 @@ bool consume_common_train_arg(
|
||||||
|
|
||||||
void finish_processing_train_args(struct train_params_common * params) {
|
void finish_processing_train_args(struct train_params_common * params) {
|
||||||
if (params->escape) {
|
if (params->escape) {
|
||||||
process_escapes(params->sample_start);
|
string_process_escapes(params->sample_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,11 +20,13 @@
|
||||||
# - Update llama.cpp with the new pre-tokenizer if necessary
|
# - Update llama.cpp with the new pre-tokenizer if necessary
|
||||||
#
|
#
|
||||||
# TODO: generate tokenizer tests for llama.cpp
|
# TODO: generate tokenizer tests for llama.cpp
|
||||||
# TODO: automate the update of convert-hf-to-gguf.py
|
|
||||||
#
|
#
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import pathlib
|
||||||
|
import re
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
|
@ -35,6 +37,7 @@ from transformers import AutoTokenizer
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
logger = logging.getLogger("convert-hf-to-gguf-update")
|
logger = logging.getLogger("convert-hf-to-gguf-update")
|
||||||
|
sess = requests.Session()
|
||||||
|
|
||||||
|
|
||||||
class TOKENIZER_TYPE(IntEnum):
|
class TOKENIZER_TYPE(IntEnum):
|
||||||
|
@ -49,6 +52,10 @@ chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶
|
||||||
|
|
||||||
if len(sys.argv) == 2:
|
if len(sys.argv) == 2:
|
||||||
token = sys.argv[1]
|
token = sys.argv[1]
|
||||||
|
if not token.startswith("hf_"):
|
||||||
|
logger.info("Huggingface token seems invalid")
|
||||||
|
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
||||||
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -65,70 +72,56 @@ models = [
|
||||||
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
|
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
|
||||||
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
||||||
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
||||||
|
{"name": "stablelm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b", },
|
||||||
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
|
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
|
||||||
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
|
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
|
||||||
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
|
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
|
||||||
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
|
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
|
||||||
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
|
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
|
||||||
|
{"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
|
||||||
|
{"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
|
||||||
|
{"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
|
||||||
|
{"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", },
|
||||||
]
|
]
|
||||||
|
|
||||||
# make directory "models/tokenizers" if it doesn't exist
|
|
||||||
if not os.path.exists("models/tokenizers"):
|
|
||||||
os.makedirs("models/tokenizers")
|
|
||||||
|
|
||||||
|
|
||||||
def download_file_with_auth(url, token, save_path):
|
def download_file_with_auth(url, token, save_path):
|
||||||
headers = {"Authorization": f"Bearer {token}"}
|
headers = {"Authorization": f"Bearer {token}"}
|
||||||
response = requests.get(url, headers=headers)
|
response = sess.get(url, headers=headers)
|
||||||
if response.status_code == 200:
|
response.raise_for_status()
|
||||||
|
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||||
with open(save_path, 'wb') as f:
|
with open(save_path, 'wb') as f:
|
||||||
f.write(response.content)
|
f.write(response.content)
|
||||||
logger.info(f"File {save_path} downloaded successfully")
|
logger.info(f"File {save_path} downloaded successfully")
|
||||||
else:
|
|
||||||
logger.info(f"Failed to download file. Status code: {response.status_code}")
|
|
||||||
|
|
||||||
|
|
||||||
# download the tokenizer models
|
def download_model(model):
|
||||||
for model in models:
|
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
repo = model["repo"]
|
repo = model["repo"]
|
||||||
tokt = model["tokt"]
|
tokt = model["tokt"]
|
||||||
|
|
||||||
if not os.path.exists(f"models/tokenizers/{name}"):
|
os.makedirs(f"models/tokenizers/{name}", exist_ok=True)
|
||||||
os.makedirs(f"models/tokenizers/{name}")
|
|
||||||
else:
|
|
||||||
logger.info(f"Directory models/tokenizers/{name} already exists - skipping")
|
|
||||||
continue
|
|
||||||
|
|
||||||
logger.info(f"Downloading {name} to models/tokenizers/{name}")
|
|
||||||
|
|
||||||
url = f"{repo}/raw/main/config.json"
|
|
||||||
save_path = f"models/tokenizers/{name}/config.json"
|
|
||||||
download_file_with_auth(url, token, save_path)
|
|
||||||
|
|
||||||
url = f"{repo}/raw/main/tokenizer.json"
|
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
|
||||||
download_file_with_auth(url, token, save_path)
|
|
||||||
|
|
||||||
# if downloaded file is less than 1KB, we likely need to download an LFS instead
|
|
||||||
if os.path.getsize(save_path) < 1024:
|
|
||||||
# remove the file
|
|
||||||
os.remove(save_path)
|
|
||||||
url = f"{repo}/resolve/main/tokenizer.json"
|
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
|
||||||
download_file_with_auth(url, token, save_path)
|
|
||||||
|
|
||||||
|
files = ["config.json", "tokenizer.json", "tokenizer_config.json"]
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
url = f"{repo}/resolve/main/tokenizer.model"
|
files.append("tokenizer.model")
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer.model"
|
|
||||||
download_file_with_auth(url, token, save_path)
|
for file in files:
|
||||||
|
save_path = f"models/tokenizers/{name}/{file}"
|
||||||
|
if os.path.isfile(save_path):
|
||||||
|
logger.info(f"{name}: File {save_path} already exists - skipping")
|
||||||
|
continue
|
||||||
|
download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path)
|
||||||
|
|
||||||
|
|
||||||
|
for model in models:
|
||||||
|
try:
|
||||||
|
download_model(model)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to download model {model['name']}. Error: {e}")
|
||||||
|
|
||||||
url = f"{repo}/raw/main/tokenizer_config.json"
|
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer_config.json"
|
|
||||||
download_file_with_auth(url, token, save_path)
|
|
||||||
|
|
||||||
# generate the source code for the convert-hf-to-gguf.py:get_vocab_base_pre() function:
|
# generate the source code for the convert-hf-to-gguf.py:get_vocab_base_pre() function:
|
||||||
# TODO: auto-update convert-hf-to-gguf.py with the generated function
|
|
||||||
|
|
||||||
src_ifs = ""
|
src_ifs = ""
|
||||||
for model in models:
|
for model in models:
|
||||||
|
@ -138,8 +131,17 @@ for model in models:
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
|
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
||||||
|
continue # Skip to the next model if the tokenizer can't be loaded
|
||||||
|
|
||||||
chktok = tokenizer.encode(chktxt)
|
chktok = tokenizer.encode(chktxt)
|
||||||
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
||||||
|
@ -157,6 +159,8 @@ for model in models:
|
||||||
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
||||||
pre_tokenizer = cfg["pre_tokenizer"]
|
pre_tokenizer = cfg["pre_tokenizer"]
|
||||||
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
||||||
|
if "ignore_merges" in cfg["model"]:
|
||||||
|
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
|
||||||
|
|
||||||
logger.info("")
|
logger.info("")
|
||||||
|
|
||||||
|
@ -206,11 +210,18 @@ src_func = f"""
|
||||||
return res
|
return res
|
||||||
"""
|
"""
|
||||||
|
|
||||||
print(src_func) # noqa: NP100
|
convert_py_pth = pathlib.Path("convert-hf-to-gguf.py")
|
||||||
|
convert_py = convert_py_pth.read_text()
|
||||||
|
convert_py = re.sub(
|
||||||
|
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
||||||
|
lambda m: m.group(1) + src_func + m.group(3),
|
||||||
|
convert_py,
|
||||||
|
flags=re.DOTALL | re.MULTILINE,
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("\n")
|
convert_py_pth.write_text(convert_py)
|
||||||
logger.info("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
|
|
||||||
logger.info("\n")
|
logger.info("+++ convert-hf-to-gguf.py was updated")
|
||||||
|
|
||||||
# generate tests for each tokenizer model
|
# generate tests for each tokenizer model
|
||||||
|
|
||||||
|
@ -257,6 +268,7 @@ tests = [
|
||||||
"3333333",
|
"3333333",
|
||||||
"33333333",
|
"33333333",
|
||||||
"333333333",
|
"333333333",
|
||||||
|
# "Cửa Việt", # llama-bpe fails on this
|
||||||
chktxt,
|
chktxt,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -277,8 +289,17 @@ for model in models:
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
tokt = model["tokt"]
|
tokt = model["tokt"]
|
||||||
|
|
||||||
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
|
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
||||||
|
continue # Skip this model and continue with the next one in the loop
|
||||||
|
|
||||||
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
||||||
for text in tests:
|
for text in tests:
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,150 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import struct
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, BinaryIO, Sequence
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
|
|
||||||
if 'NO_LOCAL_GGUF' not in os.environ:
|
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
|
|
||||||
import gguf
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
logger = logging.getLogger("lora-to-gguf")
|
|
||||||
|
|
||||||
NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1}
|
|
||||||
|
|
||||||
|
|
||||||
def write_file_header(fout: BinaryIO, params: dict[str, Any]) -> None:
|
|
||||||
fout.write(b"ggla"[::-1]) # magic (ggml lora)
|
|
||||||
fout.write(struct.pack("i", 1)) # file version
|
|
||||||
fout.write(struct.pack("i", params["r"]))
|
|
||||||
# https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
|
|
||||||
# but some models ship a float value instead
|
|
||||||
# let's convert to int, but fail if lossless conversion is not possible
|
|
||||||
assert (
|
|
||||||
int(params["lora_alpha"]) == params["lora_alpha"]
|
|
||||||
), "cannot convert float to int losslessly"
|
|
||||||
fout.write(struct.pack("i", int(params["lora_alpha"])))
|
|
||||||
|
|
||||||
|
|
||||||
def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_type: np.dtype[Any]) -> None:
|
|
||||||
sname = name.encode("utf-8")
|
|
||||||
fout.write(
|
|
||||||
struct.pack(
|
|
||||||
"iii",
|
|
||||||
len(shape),
|
|
||||||
len(sname),
|
|
||||||
NUMPY_TYPE_TO_FTYPE[data_type.name],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
|
|
||||||
fout.write(sname)
|
|
||||||
fout.seek((fout.tell() + 31) & -32)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
if len(sys.argv) < 2:
|
|
||||||
logger.info(f"Usage: python {sys.argv[0]} <path> [arch]")
|
|
||||||
logger.info("Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'")
|
|
||||||
logger.info(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
input_json = os.path.join(sys.argv[1], "adapter_config.json")
|
|
||||||
input_model = os.path.join(sys.argv[1], "adapter_model.bin")
|
|
||||||
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
|
|
||||||
|
|
||||||
if os.path.exists(input_model):
|
|
||||||
model = torch.load(input_model, map_location="cpu")
|
|
||||||
else:
|
|
||||||
input_model = os.path.join(sys.argv[1], "adapter_model.safetensors")
|
|
||||||
# lazy import load_file only if lora is in safetensors format.
|
|
||||||
from safetensors.torch import load_file
|
|
||||||
model = load_file(input_model, device="cpu")
|
|
||||||
|
|
||||||
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
|
|
||||||
|
|
||||||
if arch_name not in gguf.MODEL_ARCH_NAMES.values():
|
|
||||||
logger.error(f"Error: unsupported architecture {arch_name}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)]
|
|
||||||
name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
|
|
||||||
|
|
||||||
with open(input_json, "r") as f:
|
|
||||||
params = json.load(f)
|
|
||||||
|
|
||||||
if params["peft_type"] != "LORA":
|
|
||||||
logger.error(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if params["fan_in_fan_out"] is True:
|
|
||||||
logger.error("Error: param fan_in_fan_out is not supported")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if params["bias"] is not None and params["bias"] != "none":
|
|
||||||
logger.error("Error: param bias is not supported")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# TODO: these seem to be layers that have been trained but without lora.
|
|
||||||
# doesn't seem widely used but eventually should be supported
|
|
||||||
if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
|
|
||||||
logger.error("Error: param modules_to_save is not supported")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
with open(output_path, "wb") as fout:
|
|
||||||
fout.truncate()
|
|
||||||
|
|
||||||
write_file_header(fout, params)
|
|
||||||
for k, v in model.items():
|
|
||||||
orig_k = k
|
|
||||||
if k.endswith(".default.weight"):
|
|
||||||
k = k.replace(".default.weight", ".weight")
|
|
||||||
if k in ["llama_proj.weight", "llama_proj.bias"]:
|
|
||||||
continue
|
|
||||||
if k.endswith("lora_A.weight"):
|
|
||||||
if v.dtype != torch.float16 and v.dtype != torch.float32:
|
|
||||||
v = v.float()
|
|
||||||
v = v.T
|
|
||||||
else:
|
|
||||||
v = v.float()
|
|
||||||
|
|
||||||
t = v.detach().numpy()
|
|
||||||
|
|
||||||
prefix = "base_model.model."
|
|
||||||
if k.startswith(prefix):
|
|
||||||
k = k[len(prefix) :]
|
|
||||||
|
|
||||||
lora_suffixes = (".lora_A.weight", ".lora_B.weight")
|
|
||||||
if k.endswith(lora_suffixes):
|
|
||||||
suffix = k[-len(lora_suffixes[0]):]
|
|
||||||
k = k[: -len(lora_suffixes[0])]
|
|
||||||
else:
|
|
||||||
logger.error(f"Error: unrecognized tensor name {orig_k}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
tname = name_map.get_name(k)
|
|
||||||
if tname is None:
|
|
||||||
logger.error(f"Error: could not map tensor name {orig_k}")
|
|
||||||
logger.error(" Note: the arch parameter must be specified if the model is not llama")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if suffix == ".lora_A.weight":
|
|
||||||
tname += ".weight.loraA"
|
|
||||||
elif suffix == ".lora_B.weight":
|
|
||||||
tname += ".weight.loraB"
|
|
||||||
else:
|
|
||||||
assert False
|
|
||||||
|
|
||||||
logger.info(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
|
|
||||||
write_tensor_header(fout, tname, t.shape, t.dtype)
|
|
||||||
t.tofile(fout)
|
|
||||||
|
|
||||||
logger.info(f"Converted {input_json} and {input_model} to {output_path}")
|
|
|
@ -1,143 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
from pprint import pprint
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from sentencepiece import SentencePieceProcessor
|
|
||||||
|
|
||||||
if 'NO_LOCAL_GGUF' not in os.environ:
|
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
|
||||||
import gguf
|
|
||||||
|
|
||||||
logger = logging.getLogger("persimmon-to-gguf")
|
|
||||||
|
|
||||||
|
|
||||||
def _flatten_dict(dct, tensors, prefix=None):
|
|
||||||
assert isinstance(dct, dict)
|
|
||||||
for key in dct.keys():
|
|
||||||
new_prefix = prefix + '.' + key if prefix is not None else key
|
|
||||||
if isinstance(dct[key], torch.Tensor):
|
|
||||||
tensors[new_prefix] = dct[key]
|
|
||||||
elif isinstance(dct[key], dict):
|
|
||||||
_flatten_dict(dct[key], tensors, new_prefix)
|
|
||||||
else:
|
|
||||||
raise ValueError(type(dct[key]))
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_sentencepiece_tokenizer_info(dir_model: Path):
|
|
||||||
tokenizer_path = dir_model / 'adept_vocab.model'
|
|
||||||
logger.info('getting sentencepiece tokenizer from', tokenizer_path)
|
|
||||||
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
|
||||||
logger.info('adding tokens')
|
|
||||||
tokens: list[bytes] = []
|
|
||||||
scores: list[float] = []
|
|
||||||
toktypes: list[int] = []
|
|
||||||
|
|
||||||
for i in range(tokenizer.vocab_size()):
|
|
||||||
text: bytes
|
|
||||||
score: float
|
|
||||||
|
|
||||||
piece = tokenizer.id_to_piece(i)
|
|
||||||
text = piece.encode("utf-8")
|
|
||||||
score = tokenizer.get_score(i)
|
|
||||||
|
|
||||||
toktype = 1
|
|
||||||
if tokenizer.is_unknown(i):
|
|
||||||
toktype = 2
|
|
||||||
if tokenizer.is_control(i):
|
|
||||||
toktype = 3
|
|
||||||
if tokenizer.is_unused(i):
|
|
||||||
toktype = 5
|
|
||||||
if tokenizer.is_byte(i):
|
|
||||||
toktype = 6
|
|
||||||
|
|
||||||
tokens.append(text)
|
|
||||||
scores.append(score)
|
|
||||||
toktypes.append(toktype)
|
|
||||||
pass
|
|
||||||
return tokens, scores, toktypes
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description="Convert a Persimmon model from Adept (e.g. Persimmon 8b chat) to a GGML compatible file")
|
|
||||||
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
|
||||||
parser.add_argument("--ckpt-path", type=Path, help="path to persimmon checkpoint .pt file")
|
|
||||||
parser.add_argument("--model-dir", type=Path, help="directory containing model e.g. 8b_chat_model_release")
|
|
||||||
parser.add_argument("--adept-inference-dir", type=str, help="path to adept-inference code directory")
|
|
||||||
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
|
||||||
args = parser.parse_args()
|
|
||||||
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
|
|
||||||
sys.path.append(str(args.adept_inference_dir))
|
|
||||||
persimmon_model = torch.load(args.ckpt_path)
|
|
||||||
hparams = persimmon_model['args']
|
|
||||||
pprint(hparams)
|
|
||||||
tensors: dict[str, torch.Tensor] = {}
|
|
||||||
_flatten_dict(persimmon_model['model'], tensors, None)
|
|
||||||
|
|
||||||
arch = gguf.MODEL_ARCH.PERSIMMON
|
|
||||||
gguf_writer = gguf.GGUFWriter(args.outfile, gguf.MODEL_ARCH_NAMES[arch])
|
|
||||||
|
|
||||||
block_count = hparams.num_layers
|
|
||||||
head_count = hparams.num_attention_heads
|
|
||||||
head_count_kv = head_count
|
|
||||||
ctx_length = hparams.seq_length
|
|
||||||
hidden_size = hparams.hidden_size
|
|
||||||
|
|
||||||
gguf_writer.add_name('persimmon-8b-chat')
|
|
||||||
gguf_writer.add_context_length(ctx_length)
|
|
||||||
gguf_writer.add_embedding_length(hidden_size)
|
|
||||||
gguf_writer.add_block_count(block_count)
|
|
||||||
gguf_writer.add_feed_forward_length(hparams.ffn_hidden_size)
|
|
||||||
# ref: https://github.com/ggerganov/llama.cpp/pull/4889/commits/eea19039fc52ea2dbd1aab45b59ab4e3e29a3443
|
|
||||||
gguf_writer.add_rope_dimension_count(hidden_size // head_count // 2)
|
|
||||||
gguf_writer.add_head_count(head_count)
|
|
||||||
gguf_writer.add_head_count_kv(head_count_kv)
|
|
||||||
gguf_writer.add_rope_freq_base(hparams.rotary_emb_base)
|
|
||||||
gguf_writer.add_layer_norm_eps(hparams.layernorm_epsilon)
|
|
||||||
|
|
||||||
tokens, scores, toktypes = _get_sentencepiece_tokenizer_info(args.model_dir)
|
|
||||||
gguf_writer.add_tokenizer_model('llama')
|
|
||||||
gguf_writer.add_tokenizer_pre('default')
|
|
||||||
gguf_writer.add_token_list(tokens)
|
|
||||||
gguf_writer.add_token_scores(scores)
|
|
||||||
gguf_writer.add_token_types(toktypes)
|
|
||||||
gguf_writer.add_bos_token_id(71013)
|
|
||||||
gguf_writer.add_eos_token_id(71013)
|
|
||||||
|
|
||||||
tensor_map = gguf.get_tensor_name_map(arch, block_count)
|
|
||||||
logger.info(tensor_map)
|
|
||||||
for name in tensors.keys():
|
|
||||||
data_torch = tensors[name]
|
|
||||||
if name.endswith(".self_attention.rotary_emb.inv_freq"):
|
|
||||||
continue
|
|
||||||
old_dtype = data_torch.dtype
|
|
||||||
# TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
|
|
||||||
data = data_torch.to(torch.float32).squeeze().numpy()
|
|
||||||
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
|
|
||||||
if new_name is None:
|
|
||||||
raise ValueError(f"Can not map tensor '{name}'")
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
logger.debug(f"{new_name}, n_dims = {str(n_dims)}, {str(old_dtype)} --> {str(data.dtype)}")
|
|
||||||
gguf_writer.add_tensor(new_name, data)
|
|
||||||
logger.info("gguf: write header")
|
|
||||||
gguf_writer.write_header_to_file()
|
|
||||||
logger.info("gguf: write metadata")
|
|
||||||
gguf_writer.write_kv_data_to_file()
|
|
||||||
logger.info("gguf: write tensors")
|
|
||||||
gguf_writer.write_tensors_to_file()
|
|
||||||
|
|
||||||
gguf_writer.close()
|
|
||||||
|
|
||||||
logger.info(f"gguf: model successfully exported to '{args.outfile}'")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
|
@ -17,7 +17,7 @@ Also, it is important to check that the examples and main ggml backends (CUDA, M
|
||||||
### 1. Convert the model to GGUF
|
### 1. Convert the model to GGUF
|
||||||
|
|
||||||
This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library.
|
This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library.
|
||||||
Depending on the model architecture, you can use either [convert.py](../convert.py) or [convert-hf-to-gguf.py](../convert-hf-to-gguf.py).
|
Depending on the model architecture, you can use either [convert-hf-to-gguf.py](../convert-hf-to-gguf.py) or [examples/convert-legacy-llama.py](../examples/convert-legacy-llama.py) (for `llama/llama2` models in `.pth` format).
|
||||||
|
|
||||||
The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors.
|
The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors.
|
||||||
|
|
||||||
|
|
104
docs/debugging-tests.md
Normal file
104
docs/debugging-tests.md
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
# Debugging Tests Tips
|
||||||
|
|
||||||
|
## How to run & execute or debug a specific test without anything else to keep the feedback loop short?
|
||||||
|
|
||||||
|
There is a script called debug-test.sh in the scripts folder whose parameter takes a REGEX and an optional test number.
|
||||||
|
|
||||||
|
For example, running the following command will output an interactive list from which you can select a test. It takes this form:
|
||||||
|
|
||||||
|
`debug-test.sh [OPTION]... <test_regex> <test_number>`
|
||||||
|
|
||||||
|
It will then build & run in the debugger for you.
|
||||||
|
|
||||||
|
To just execute a test and get back a PASS or FAIL message run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/debug-test.sh test-tokenizer
|
||||||
|
```
|
||||||
|
|
||||||
|
To test in GDB use the `-g` flag to enable gdb test mode.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/debug-test.sh -g test-tokenizer
|
||||||
|
|
||||||
|
# Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows:
|
||||||
|
>>> b main
|
||||||
|
```
|
||||||
|
|
||||||
|
To speed up the testing loop, if you know your test number you can just run it similar to below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/debug-test.sh test 23
|
||||||
|
```
|
||||||
|
|
||||||
|
For further reference use `debug-test.sh -h` to print help.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### How does the script work?
|
||||||
|
If you want to be able to use the concepts contained in the script separately, the important ones are briefly outlined below.
|
||||||
|
|
||||||
|
#### Step 1: Reset and Setup folder context
|
||||||
|
|
||||||
|
From base of this repository, let's create `build-ci-debug` as our build context.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 2: Setup Build Environment and Compile Test Binaries
|
||||||
|
|
||||||
|
Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON ..
|
||||||
|
make -j
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3: Find all tests available that matches REGEX
|
||||||
|
|
||||||
|
The output of this command will give you the command & arguments needed to run GDB.
|
||||||
|
|
||||||
|
* `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex)
|
||||||
|
* `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB.
|
||||||
|
* `-V` : Verbose Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ctest -R "test-tokenizer" -V -N
|
||||||
|
```
|
||||||
|
|
||||||
|
This may return output similar to below (focusing on key lines to pay attention to):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
...
|
||||||
|
1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
||||||
|
1: Working Directory: .
|
||||||
|
Labels: main
|
||||||
|
Test #1: test-tokenizer-0-llama-spm
|
||||||
|
...
|
||||||
|
4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf"
|
||||||
|
4: Working Directory: .
|
||||||
|
Labels: main
|
||||||
|
Test #4: test-tokenizer-0-falcon
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 4: Identify Test Command for Debugging
|
||||||
|
|
||||||
|
So for test #1 above we can tell these two pieces of relevant information:
|
||||||
|
* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0`
|
||||||
|
* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf`
|
||||||
|
|
||||||
|
#### Step 5: Run GDB on test command
|
||||||
|
|
||||||
|
Based on the ctest 'test command' report above we can then run a gdb session via this command below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gdb --args ${Test Binary} ${Test GGUF Model}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
||||||
|
```
|
|
@ -49,4 +49,7 @@ else()
|
||||||
add_subdirectory(server)
|
add_subdirectory(server)
|
||||||
endif()
|
endif()
|
||||||
add_subdirectory(export-lora)
|
add_subdirectory(export-lora)
|
||||||
|
if (LLAMA_RPC)
|
||||||
|
add_subdirectory(rpc)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
|
@ -48,7 +48,7 @@ int main(int argc, char ** argv) {
|
||||||
params.prompt = "Hello my name is";
|
params.prompt = "Hello my name is";
|
||||||
}
|
}
|
||||||
|
|
||||||
process_escapes(params.prompt);
|
string_process_escapes(params.prompt);
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
|
|
|
@ -24,14 +24,16 @@ from abc import ABC, abstractmethod
|
||||||
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, Any, Callable, ClassVar, IO, Iterable, Literal, Protocol, TypeVar, runtime_checkable
|
from typing import TYPE_CHECKING, Any, Callable, IO, Iterable, Literal, TypeVar, Optional
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sentencepiece import SentencePieceProcessor
|
|
||||||
|
|
||||||
if 'NO_LOCAL_GGUF' not in os.environ:
|
if 'NO_LOCAL_GGUF' not in os.environ:
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
# use .parent.parent since we are in "examples" directory
|
||||||
|
sys.path.insert(1, str(Path(__file__).parent.parent / 'gguf-py'))
|
||||||
|
|
||||||
import gguf
|
import gguf
|
||||||
|
from gguf import BaseVocab, Vocab, NoVocab, BpeVocab, SentencePieceVocab, LlamaHfVocab
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing_extensions import Self, TypeAlias
|
from typing_extensions import Self, TypeAlias
|
||||||
|
@ -284,6 +286,7 @@ class Params:
|
||||||
n_experts = None
|
n_experts = None
|
||||||
n_experts_used = None
|
n_experts_used = None
|
||||||
f_rope_freq_base = None
|
f_rope_freq_base = None
|
||||||
|
n_ff = None
|
||||||
|
|
||||||
# hack to determine LLaMA v1 vs v2 vs CodeLlama
|
# hack to determine LLaMA v1 vs v2 vs CodeLlama
|
||||||
if config.get("moe"):
|
if config.get("moe"):
|
||||||
|
@ -308,6 +311,8 @@ class Params:
|
||||||
n_experts_used = config["moe"]["num_experts_per_tok"]
|
n_experts_used = config["moe"]["num_experts_per_tok"]
|
||||||
f_rope_freq_base = 1e6
|
f_rope_freq_base = 1e6
|
||||||
|
|
||||||
|
assert n_ff is not None
|
||||||
|
|
||||||
return Params(
|
return Params(
|
||||||
n_vocab = model["tok_embeddings.weight"].shape[0],
|
n_vocab = model["tok_embeddings.weight"].shape[0],
|
||||||
n_embd = config["dim"],
|
n_embd = config["dim"],
|
||||||
|
@ -341,302 +346,40 @@ class Params:
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
#
|
@dataclass
|
||||||
# vocab
|
class Metadata:
|
||||||
#
|
name: Optional[str] = None
|
||||||
|
author: Optional[str] = None
|
||||||
|
version: Optional[str] = None
|
||||||
|
url: Optional[str] = None
|
||||||
|
description: Optional[str] = None
|
||||||
|
licence: Optional[str] = None
|
||||||
|
source_url: Optional[str] = None
|
||||||
|
source_hf_repo: Optional[str] = None
|
||||||
|
|
||||||
@runtime_checkable
|
@staticmethod
|
||||||
class BaseVocab(Protocol):
|
def load(metadata_path: Path) -> Metadata:
|
||||||
tokenizer_model: ClassVar[str]
|
if metadata_path is None or not metadata_path.exists():
|
||||||
name: ClassVar[str]
|
return Metadata()
|
||||||
|
|
||||||
|
with open(metadata_path, 'r') as file:
|
||||||
|
data = json.load(file)
|
||||||
|
|
||||||
class NoVocab(BaseVocab):
|
# Create a new Metadata instance
|
||||||
tokenizer_model = "no_vocab"
|
metadata = Metadata()
|
||||||
name = "no_vocab"
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
# Assigning values to Metadata attributes if they exist in the JSON file
|
||||||
return "<NoVocab for a model without integrated vocabulary>"
|
# This is based on LLM_KV_NAMES mapping in llama.cpp
|
||||||
|
metadata.name = data.get("general.name")
|
||||||
|
metadata.author = data.get("general.author")
|
||||||
|
metadata.version = data.get("general.version")
|
||||||
|
metadata.url = data.get("general.url")
|
||||||
|
metadata.description = data.get("general.description")
|
||||||
|
metadata.license = data.get("general.license")
|
||||||
|
metadata.source_url = data.get("general.source.url")
|
||||||
|
metadata.source_hf_repo = data.get("general.source.huggingface.repository")
|
||||||
|
|
||||||
|
return metadata
|
||||||
@runtime_checkable
|
|
||||||
class Vocab(BaseVocab, Protocol):
|
|
||||||
vocab_size: int
|
|
||||||
added_tokens_dict: dict[str, int]
|
|
||||||
added_tokens_list: list[str]
|
|
||||||
fname_tokenizer: Path
|
|
||||||
|
|
||||||
def __init__(self, base_path: Path): ...
|
|
||||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ...
|
|
||||||
|
|
||||||
|
|
||||||
class BpeVocab(Vocab):
|
|
||||||
tokenizer_model = "gpt2"
|
|
||||||
name = "bpe"
|
|
||||||
|
|
||||||
def __init__(self, base_path: Path):
|
|
||||||
added_tokens: dict[str, int] = {}
|
|
||||||
|
|
||||||
if (fname_tokenizer := base_path / 'vocab.json').exists():
|
|
||||||
# "slow" tokenizer
|
|
||||||
with open(fname_tokenizer, encoding="utf-8") as f:
|
|
||||||
self.vocab = json.load(f)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
|
|
||||||
with open(base_path / ADDED_TOKENS_FILE, encoding="utf-8") as f:
|
|
||||||
added_tokens = json.load(f)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# "fast" tokenizer
|
|
||||||
fname_tokenizer = base_path / FAST_TOKENIZER_FILE
|
|
||||||
|
|
||||||
# if this fails, FileNotFoundError propagates to caller
|
|
||||||
with open(fname_tokenizer, encoding="utf-8") as f:
|
|
||||||
tokenizer_json = json.load(f)
|
|
||||||
|
|
||||||
tokenizer_model: dict[str, Any] = tokenizer_json['model']
|
|
||||||
if (
|
|
||||||
tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False)
|
|
||||||
or tokenizer_json['decoder']['type'] != 'ByteLevel'
|
|
||||||
):
|
|
||||||
raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer')
|
|
||||||
|
|
||||||
self.vocab = tokenizer_model["vocab"]
|
|
||||||
|
|
||||||
if (added := tokenizer_json.get('added_tokens')) is not None:
|
|
||||||
# Added tokens here can be duplicates of the main vocabulary.
|
|
||||||
added_tokens = {item['content']: item['id']
|
|
||||||
for item in added
|
|
||||||
if item['content'] not in self.vocab}
|
|
||||||
|
|
||||||
vocab_size = len(self.vocab)
|
|
||||||
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
|
||||||
actual_ids = sorted(added_tokens.values())
|
|
||||||
if expected_ids != actual_ids:
|
|
||||||
expected_end_id = vocab_size + len(actual_ids) - 1
|
|
||||||
raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
|
|
||||||
f"{vocab_size} - {expected_end_id}; got {actual_ids}")
|
|
||||||
|
|
||||||
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
|
|
||||||
self.added_tokens_dict = added_tokens
|
|
||||||
self.added_tokens_list = [text for (text, idx) in items]
|
|
||||||
self.vocab_size_base = vocab_size
|
|
||||||
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
|
|
||||||
self.fname_tokenizer = fname_tokenizer
|
|
||||||
|
|
||||||
def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}
|
|
||||||
|
|
||||||
for i, _ in enumerate(self.vocab):
|
|
||||||
yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL
|
|
||||||
|
|
||||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
for text in self.added_tokens_list:
|
|
||||||
score = -1000.0
|
|
||||||
yield text.encode("utf-8"), score, gguf.TokenType.CONTROL
|
|
||||||
|
|
||||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
yield from self.bpe_tokens()
|
|
||||||
yield from self.added_tokens()
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"<BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
|
||||||
|
|
||||||
|
|
||||||
class SentencePieceVocab(Vocab):
|
|
||||||
tokenizer_model = "llama"
|
|
||||||
name = "spm"
|
|
||||||
|
|
||||||
def __init__(self, base_path: Path):
|
|
||||||
added_tokens: dict[str, int] = {}
|
|
||||||
if (fname_tokenizer := base_path / 'tokenizer.model').exists():
|
|
||||||
# normal location
|
|
||||||
try:
|
|
||||||
with open(base_path / ADDED_TOKENS_FILE, encoding="utf-8") as f:
|
|
||||||
added_tokens = json.load(f)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists():
|
|
||||||
# not found in alternate location either
|
|
||||||
raise FileNotFoundError('Cannot find tokenizer.model')
|
|
||||||
|
|
||||||
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
|
||||||
vocab_size = self.sentencepiece_tokenizer.vocab_size()
|
|
||||||
|
|
||||||
new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
|
|
||||||
expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens)))
|
|
||||||
actual_new_ids = sorted(new_tokens.keys())
|
|
||||||
|
|
||||||
if expected_new_ids != actual_new_ids:
|
|
||||||
raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}")
|
|
||||||
|
|
||||||
# Token pieces that were added to the base vocabulary.
|
|
||||||
self.added_tokens_dict = added_tokens
|
|
||||||
self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
|
|
||||||
self.vocab_size_base = vocab_size
|
|
||||||
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
|
|
||||||
self.fname_tokenizer = fname_tokenizer
|
|
||||||
|
|
||||||
def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
tokenizer = self.sentencepiece_tokenizer
|
|
||||||
for i in range(tokenizer.vocab_size()):
|
|
||||||
piece = tokenizer.id_to_piece(i)
|
|
||||||
text = piece.encode("utf-8")
|
|
||||||
score: float = tokenizer.get_score(i)
|
|
||||||
|
|
||||||
toktype = gguf.TokenType.NORMAL
|
|
||||||
if tokenizer.is_unknown(i):
|
|
||||||
toktype = gguf.TokenType.UNKNOWN
|
|
||||||
if tokenizer.is_control(i):
|
|
||||||
toktype = gguf.TokenType.CONTROL
|
|
||||||
|
|
||||||
# NOTE: I think added_tokens are user defined.
|
|
||||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
|
||||||
# if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
|
|
||||||
|
|
||||||
if tokenizer.is_unused(i):
|
|
||||||
toktype = gguf.TokenType.UNUSED
|
|
||||||
if tokenizer.is_byte(i):
|
|
||||||
toktype = gguf.TokenType.BYTE
|
|
||||||
|
|
||||||
yield text, score, toktype
|
|
||||||
|
|
||||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
for text in self.added_tokens_list:
|
|
||||||
score = -1000.0
|
|
||||||
yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
|
|
||||||
|
|
||||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
yield from self.sentencepiece_tokens()
|
|
||||||
yield from self.added_tokens()
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
|
||||||
|
|
||||||
|
|
||||||
class LlamaHfVocab(Vocab):
|
|
||||||
tokenizer_model = "llama"
|
|
||||||
name = "hfft"
|
|
||||||
|
|
||||||
def __init__(self, base_path: Path):
|
|
||||||
fname_tokenizer = base_path / FAST_TOKENIZER_FILE
|
|
||||||
# if this fails, FileNotFoundError propagates to caller
|
|
||||||
with open(fname_tokenizer, encoding='utf-8') as f:
|
|
||||||
tokenizer_json = json.load(f)
|
|
||||||
|
|
||||||
# pre-check so we know if we need transformers
|
|
||||||
tokenizer_model: dict[str, Any] = tokenizer_json['model']
|
|
||||||
is_llama3 = (
|
|
||||||
tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False)
|
|
||||||
and not tokenizer_model.get('byte_fallback', True)
|
|
||||||
)
|
|
||||||
if is_llama3:
|
|
||||||
raise TypeError('Llama 3 must be converted with BpeVocab')
|
|
||||||
|
|
||||||
if not is_llama3 and (
|
|
||||||
tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
|
|
||||||
or tokenizer_json['decoder']['type'] != 'Sequence'
|
|
||||||
):
|
|
||||||
raise FileNotFoundError('Cannot find Llama BPE tokenizer')
|
|
||||||
|
|
||||||
try:
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
except ImportError as e:
|
|
||||||
raise ImportError(
|
|
||||||
"To use LlamaHfVocab, please install the `transformers` package. "
|
|
||||||
"You can install it with `pip install transformers`."
|
|
||||||
) from e
|
|
||||||
|
|
||||||
# Allow the tokenizer to default to slow or fast versions.
|
|
||||||
# Explicitly set tokenizer to use local paths.
|
|
||||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
||||||
base_path,
|
|
||||||
cache_dir=base_path,
|
|
||||||
local_files_only=True,
|
|
||||||
)
|
|
||||||
assert self.tokenizer.is_fast # assume tokenizer.json is used
|
|
||||||
|
|
||||||
# Initialize lists and dictionaries for added tokens
|
|
||||||
self.added_tokens_list = []
|
|
||||||
self.added_tokens_dict = dict()
|
|
||||||
self.added_tokens_ids = set()
|
|
||||||
|
|
||||||
# Process added tokens
|
|
||||||
for tok, tokidx in sorted(
|
|
||||||
self.tokenizer.get_added_vocab().items(), key=lambda x: x[1]
|
|
||||||
):
|
|
||||||
# Only consider added tokens that are not in the base vocabulary
|
|
||||||
if tokidx >= self.tokenizer.vocab_size:
|
|
||||||
self.added_tokens_list.append(tok)
|
|
||||||
self.added_tokens_dict[tok] = tokidx
|
|
||||||
self.added_tokens_ids.add(tokidx)
|
|
||||||
|
|
||||||
# Store special tokens and their IDs
|
|
||||||
self.specials = {
|
|
||||||
tok: self.tokenizer.get_vocab()[tok]
|
|
||||||
for tok in self.tokenizer.all_special_tokens
|
|
||||||
}
|
|
||||||
self.special_ids = set(self.tokenizer.all_special_ids)
|
|
||||||
|
|
||||||
# Set vocabulary sizes
|
|
||||||
self.vocab_size_base = self.tokenizer.vocab_size
|
|
||||||
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
|
|
||||||
|
|
||||||
self.fname_tokenizer = fname_tokenizer
|
|
||||||
|
|
||||||
def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
reverse_vocab = {
|
|
||||||
id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items()
|
|
||||||
}
|
|
||||||
|
|
||||||
for token_id in range(self.vocab_size_base):
|
|
||||||
# Skip processing added tokens here
|
|
||||||
if token_id in self.added_tokens_ids:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Convert token text to bytes
|
|
||||||
token_text = reverse_vocab[token_id].encode("utf-8")
|
|
||||||
|
|
||||||
# Yield token text, score, and type
|
|
||||||
yield token_text, self.get_token_score(token_id), self.get_token_type(
|
|
||||||
token_id, token_text, self.special_ids # Reuse already stored special IDs
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType:
|
|
||||||
# Special case for byte tokens
|
|
||||||
if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
|
|
||||||
return gguf.TokenType.BYTE
|
|
||||||
|
|
||||||
# Determine token type based on whether it's a special token
|
|
||||||
return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
|
|
||||||
|
|
||||||
def get_token_score(self, token_id: int) -> float:
|
|
||||||
# Placeholder for actual logic to determine the token's score
|
|
||||||
# This needs to be implemented based on specific requirements
|
|
||||||
return -1000.0 # Default score
|
|
||||||
|
|
||||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
for text in self.added_tokens_list:
|
|
||||||
if text in self.specials:
|
|
||||||
toktype = self.get_token_type(self.specials[text], b'', self.special_ids)
|
|
||||||
score = self.get_token_score(self.specials[text])
|
|
||||||
else:
|
|
||||||
toktype = gguf.TokenType.USER_DEFINED
|
|
||||||
score = -1000.0
|
|
||||||
|
|
||||||
yield text.encode("utf-8"), score, toktype
|
|
||||||
|
|
||||||
def has_newline_token(self):
|
|
||||||
return "<0x0A>" in self.tokenizer.vocab or "\n" in self.tokenizer.vocab
|
|
||||||
|
|
||||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
|
||||||
yield from self.hf_tokens()
|
|
||||||
yield from self.added_tokens()
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"<LlamaHfVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -906,7 +649,7 @@ class LazyUnpickler(pickle.Unpickler):
|
||||||
def rebuild_from_type_v2(func, new_type, args, state):
|
def rebuild_from_type_v2(func, new_type, args, state):
|
||||||
return func(*args)
|
return func(*args)
|
||||||
|
|
||||||
CLASSES = {
|
CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = {
|
||||||
# getattr used here as a workaround for mypy not being smart enough to determine
|
# getattr used here as a workaround for mypy not being smart enough to determine
|
||||||
# the staticmethods have a __func__ attribute.
|
# the staticmethods have a __func__ attribute.
|
||||||
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
|
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
|
||||||
|
@ -1062,21 +805,42 @@ class OutputFile:
|
||||||
def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE):
|
def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE):
|
||||||
self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess)
|
self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess)
|
||||||
|
|
||||||
def add_meta_arch(self, params: Params) -> None:
|
def add_meta_model(self, params: Params, metadata: Metadata) -> None:
|
||||||
|
# Metadata About The Model And Its Provenence
|
||||||
name = "LLaMA"
|
name = "LLaMA"
|
||||||
|
if metadata is not None and metadata.name is not None:
|
||||||
# TODO: better logic to determine model name
|
name = metadata.name
|
||||||
if params.n_ctx == 4096:
|
|
||||||
name = "LLaMA v2"
|
|
||||||
elif params.path_model is not None:
|
elif params.path_model is not None:
|
||||||
name = str(params.path_model.parent).split('/')[-1]
|
name = params.path_model.name
|
||||||
|
elif params.n_ctx == 4096:
|
||||||
|
# Heuristic detection of LLaMA v2 model
|
||||||
|
name = "LLaMA v2"
|
||||||
|
|
||||||
self.gguf.add_name (name)
|
self.gguf.add_name(name)
|
||||||
self.gguf.add_vocab_size (params.n_vocab)
|
|
||||||
self.gguf.add_context_length (params.n_ctx)
|
if metadata is not None:
|
||||||
self.gguf.add_embedding_length (params.n_embd)
|
if metadata.author is not None:
|
||||||
self.gguf.add_block_count (params.n_layer)
|
self.gguf.add_author(metadata.author)
|
||||||
self.gguf.add_feed_forward_length (params.n_ff)
|
if metadata.version is not None:
|
||||||
|
self.gguf.add_version(metadata.version)
|
||||||
|
if metadata.url is not None:
|
||||||
|
self.gguf.add_url(metadata.url)
|
||||||
|
if metadata.description is not None:
|
||||||
|
self.gguf.add_description(metadata.description)
|
||||||
|
if metadata.licence is not None:
|
||||||
|
self.gguf.add_licence(metadata.licence)
|
||||||
|
if metadata.source_url is not None:
|
||||||
|
self.gguf.add_source_url(metadata.source_url)
|
||||||
|
if metadata.source_hf_repo is not None:
|
||||||
|
self.gguf.add_source_hf_repo(metadata.source_hf_repo)
|
||||||
|
|
||||||
|
def add_meta_arch(self, params: Params) -> None:
|
||||||
|
# Metadata About The Neural Architecture Itself
|
||||||
|
self.gguf.add_vocab_size(params.n_vocab)
|
||||||
|
self.gguf.add_context_length(params.n_ctx)
|
||||||
|
self.gguf.add_embedding_length(params.n_embd)
|
||||||
|
self.gguf.add_block_count(params.n_layer)
|
||||||
|
self.gguf.add_feed_forward_length(params.n_ff)
|
||||||
self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
|
self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
|
||||||
self.gguf.add_head_count (params.n_head)
|
self.gguf.add_head_count (params.n_head)
|
||||||
self.gguf.add_head_count_kv (params.n_head_kv)
|
self.gguf.add_head_count_kv (params.n_head_kv)
|
||||||
|
@ -1179,13 +943,14 @@ class OutputFile:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def write_vocab_only(
|
def write_vocab_only(
|
||||||
fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
|
fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
|
||||||
endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False,
|
endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, metadata: Metadata = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
||||||
|
|
||||||
of = OutputFile(fname_out, endianess=endianess)
|
of = OutputFile(fname_out, endianess=endianess)
|
||||||
|
|
||||||
# meta data
|
# meta data
|
||||||
|
of.add_meta_model(params, metadata)
|
||||||
of.add_meta_arch(params)
|
of.add_meta_arch(params)
|
||||||
of.add_meta_vocab(vocab)
|
of.add_meta_vocab(vocab)
|
||||||
of.add_meta_special_vocab(svocab)
|
of.add_meta_special_vocab(svocab)
|
||||||
|
@ -1212,12 +977,14 @@ class OutputFile:
|
||||||
fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab,
|
fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab,
|
||||||
concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE,
|
concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE,
|
||||||
pad_vocab: bool = False,
|
pad_vocab: bool = False,
|
||||||
|
metadata: Metadata = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
||||||
|
|
||||||
of = OutputFile(fname_out, endianess=endianess)
|
of = OutputFile(fname_out, endianess=endianess)
|
||||||
|
|
||||||
# meta data
|
# meta data
|
||||||
|
of.add_meta_model(params, metadata)
|
||||||
of.add_meta_arch(params)
|
of.add_meta_arch(params)
|
||||||
if isinstance(vocab, Vocab):
|
if isinstance(vocab, Vocab):
|
||||||
of.add_meta_vocab(vocab)
|
of.add_meta_vocab(vocab)
|
||||||
|
@ -1253,6 +1020,37 @@ def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileT
|
||||||
raise ValueError(f"Unexpected combination of types: {name_to_type}")
|
raise ValueError(f"Unexpected combination of types: {name_to_type}")
|
||||||
|
|
||||||
|
|
||||||
|
def model_parameter_count(model: LazyModel) -> int:
|
||||||
|
total_model_parameters = 0
|
||||||
|
for i, (name, lazy_tensor) in enumerate(model.items()):
|
||||||
|
sum_weights_in_tensor = 1
|
||||||
|
for dim in lazy_tensor.shape:
|
||||||
|
sum_weights_in_tensor *= dim
|
||||||
|
total_model_parameters += sum_weights_in_tensor
|
||||||
|
return total_model_parameters
|
||||||
|
|
||||||
|
|
||||||
|
def model_parameter_count_rounded_notation(model_params_count: int) -> str:
|
||||||
|
if model_params_count > 1e12 :
|
||||||
|
# Trillions Of Parameters
|
||||||
|
scaled_model_params = model_params_count * 1e-12
|
||||||
|
scale_suffix = "T"
|
||||||
|
elif model_params_count > 1e9 :
|
||||||
|
# Billions Of Parameters
|
||||||
|
scaled_model_params = model_params_count * 1e-9
|
||||||
|
scale_suffix = "B"
|
||||||
|
elif model_params_count > 1e6 :
|
||||||
|
# Millions Of Parameters
|
||||||
|
scaled_model_params = model_params_count * 1e-6
|
||||||
|
scale_suffix = "M"
|
||||||
|
else:
|
||||||
|
# Thousands Of Parameters
|
||||||
|
scaled_model_params = model_params_count * 1e-3
|
||||||
|
scale_suffix = "K"
|
||||||
|
|
||||||
|
return f"{round(scaled_model_params)}{scale_suffix}"
|
||||||
|
|
||||||
|
|
||||||
def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
|
def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
|
||||||
return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
|
return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
|
||||||
for (name, tensor) in model.items()}
|
for (name, tensor) in model.items()}
|
||||||
|
@ -1432,13 +1230,35 @@ class VocabFactory:
|
||||||
return vocab, special_vocab
|
return vocab, special_vocab
|
||||||
|
|
||||||
|
|
||||||
def default_outfile(model_paths: list[Path], file_type: GGMLFileType) -> Path:
|
def default_convention_outfile(file_type: GGMLFileType, params: Params, model_params_count: int, metadata: Metadata) -> str:
|
||||||
namestr = {
|
quantization = {
|
||||||
GGMLFileType.AllF32: "f32",
|
GGMLFileType.AllF32: "F32",
|
||||||
GGMLFileType.MostlyF16: "f16",
|
GGMLFileType.MostlyF16: "F16",
|
||||||
GGMLFileType.MostlyQ8_0:"q8_0",
|
GGMLFileType.MostlyQ8_0: "Q8_0",
|
||||||
}[file_type]
|
}[file_type]
|
||||||
ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
|
|
||||||
|
parameters = model_parameter_count_rounded_notation(model_params_count)
|
||||||
|
|
||||||
|
expert_count = ""
|
||||||
|
if params.n_experts is not None:
|
||||||
|
expert_count = f"{params.n_experts}x"
|
||||||
|
|
||||||
|
version = ""
|
||||||
|
if metadata is not None and metadata.version is not None:
|
||||||
|
version = f"-{metadata.version}"
|
||||||
|
|
||||||
|
name = "ggml-model"
|
||||||
|
if metadata is not None and metadata.name is not None:
|
||||||
|
name = metadata.name
|
||||||
|
elif params.path_model is not None:
|
||||||
|
name = params.path_model.name
|
||||||
|
|
||||||
|
return f"{name}{version}-{expert_count}{parameters}-{quantization}"
|
||||||
|
|
||||||
|
|
||||||
|
def default_outfile(model_paths: list[Path], file_type: GGMLFileType, params: Params, model_params_count: int, metadata: Metadata) -> Path:
|
||||||
|
default_filename = default_convention_outfile(file_type, params, model_params_count, metadata)
|
||||||
|
ret = model_paths[0].parent / f"{default_filename}.gguf"
|
||||||
if ret in model_paths:
|
if ret in model_paths:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Error: Default output path ({ret}) would overwrite the input. "
|
f"Error: Default output path ({ret}) would overwrite the input. "
|
||||||
|
@ -1476,17 +1296,30 @@ def main(args_in: list[str] | None = None) -> None:
|
||||||
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
|
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
|
||||||
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
||||||
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||||
|
parser.add_argument("--metadata", type=Path, help="Specify the path for a metadata file")
|
||||||
|
parser.add_argument("--get-outfile", action="store_true", help="get calculated default outfile name")
|
||||||
|
|
||||||
args = parser.parse_args(args_in)
|
args = parser.parse_args(args_in)
|
||||||
|
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
elif args.dump_single or args.dump:
|
elif args.dump_single or args.dump or args.get_outfile:
|
||||||
# Avoid printing anything besides the dump output
|
# Avoid printing anything besides the dump output
|
||||||
logging.basicConfig(level=logging.WARNING)
|
logging.basicConfig(level=logging.WARNING)
|
||||||
else:
|
else:
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
metadata = Metadata.load(args.metadata)
|
||||||
|
|
||||||
|
if args.get_outfile:
|
||||||
|
model_plus = load_some_model(args.model)
|
||||||
|
params = Params.load(model_plus)
|
||||||
|
model = convert_model_names(model_plus.model, params, args.skip_unknown)
|
||||||
|
model_params_count = model_parameter_count(model_plus.model)
|
||||||
|
ftype = pick_output_type(model, args.outtype)
|
||||||
|
print(f"{default_convention_outfile(ftype, params, model_params_count, metadata)}") # noqa: NP100
|
||||||
|
return
|
||||||
|
|
||||||
if args.no_vocab and args.vocab_only:
|
if args.no_vocab and args.vocab_only:
|
||||||
raise ValueError("--vocab-only does not make sense with --no-vocab")
|
raise ValueError("--vocab-only does not make sense with --no-vocab")
|
||||||
|
|
||||||
|
@ -1500,6 +1333,9 @@ def main(args_in: list[str] | None = None) -> None:
|
||||||
else:
|
else:
|
||||||
model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None)
|
model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None)
|
||||||
|
|
||||||
|
model_params_count = model_parameter_count(model_plus.model)
|
||||||
|
logger.info(f"model parameters count : {model_params_count} ({model_parameter_count_rounded_notation(model_params_count)})")
|
||||||
|
|
||||||
if args.dump:
|
if args.dump:
|
||||||
do_dump_model(model_plus)
|
do_dump_model(model_plus)
|
||||||
return
|
return
|
||||||
|
@ -1553,7 +1389,7 @@ def main(args_in: list[str] | None = None) -> None:
|
||||||
f_norm_eps = 1e-5,
|
f_norm_eps = 1e-5,
|
||||||
)
|
)
|
||||||
OutputFile.write_vocab_only(outfile, params, vocab, special_vocab,
|
OutputFile.write_vocab_only(outfile, params, vocab, special_vocab,
|
||||||
endianess=endianess, pad_vocab=args.pad_vocab)
|
endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata)
|
||||||
logger.info(f"Wrote {outfile}")
|
logger.info(f"Wrote {outfile}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1566,13 +1402,13 @@ def main(args_in: list[str] | None = None) -> None:
|
||||||
model = convert_model_names(model, params, args.skip_unknown)
|
model = convert_model_names(model, params, args.skip_unknown)
|
||||||
ftype = pick_output_type(model, args.outtype)
|
ftype = pick_output_type(model, args.outtype)
|
||||||
model = convert_to_output_type(model, ftype)
|
model = convert_to_output_type(model, ftype)
|
||||||
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
outfile = args.outfile or default_outfile(model_plus.paths, ftype, params, model_params_count, metadata)
|
||||||
|
|
||||||
params.ftype = ftype
|
params.ftype = ftype
|
||||||
logger.info(f"Writing {outfile}, format {ftype}")
|
logger.info(f"Writing {outfile}, format {ftype}")
|
||||||
|
|
||||||
OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab,
|
OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab,
|
||||||
concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab)
|
concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata)
|
||||||
logger.info(f"Wrote {outfile}")
|
logger.info(f"Wrote {outfile}")
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default.
|
This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default.
|
||||||
|
|
||||||
To convert the model first download the models from the [llma2.c](https://github.com/karpathy/llama2.c) repository:
|
To convert the model first download the models from the [llama2.c](https://github.com/karpathy/llama2.c) repository:
|
||||||
|
|
||||||
`$ make -j`
|
`$ make -j`
|
||||||
|
|
||||||
|
|
|
@ -774,7 +774,7 @@ static struct train_params get_default_train_params() {
|
||||||
|
|
||||||
params.samples_start_after_nl = false;
|
params.samples_start_after_nl = false;
|
||||||
params.use_adam = true;
|
params.use_adam = true;
|
||||||
params.use_flash = true;
|
params.use_flash = false;
|
||||||
params.use_scratch = true;
|
params.use_scratch = true;
|
||||||
|
|
||||||
// only adam
|
// only adam
|
||||||
|
|
|
@ -49,6 +49,12 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
||||||
}
|
}
|
||||||
|
|
||||||
float * out = output + batch.seq_id[i][0] * n_embd;
|
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||||
|
//TODO: I would also add a parameter here to enable normalization or not.
|
||||||
|
/*fprintf(stdout, "unnormalized_embedding:");
|
||||||
|
for (int hh = 0; hh < n_embd; hh++) {
|
||||||
|
fprintf(stdout, "%9.6f ", embd[hh]);
|
||||||
|
}
|
||||||
|
fprintf(stdout, "\n");*/
|
||||||
llama_embd_normalize(embd, out, n_embd);
|
llama_embd_normalize(embd, out, n_embd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,7 +80,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
if (params.random_prompt) {
|
if (params.random_prompt) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = string_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
|
@ -101,7 +107,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// split the prompt into lines
|
// split the prompt into lines
|
||||||
|
@ -123,10 +129,12 @@ int main(int argc, char ** argv) {
|
||||||
inputs.push_back(inp);
|
inputs.push_back(inp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// add SEP if not present
|
// check if the last token is SEP
|
||||||
|
// it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true'
|
||||||
for (auto & inp : inputs) {
|
for (auto & inp : inputs) {
|
||||||
if (inp.empty() || inp.back() != llama_token_sep(model)) {
|
if (inp.empty() || inp.back() != llama_token_sep(model)) {
|
||||||
inp.push_back(llama_token_sep(model));
|
fprintf(stderr, "%s: warning: last token in the prompt is not SEP\n", __func__);
|
||||||
|
fprintf(stderr, "%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,6 +211,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// clean up
|
// clean up
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
|
llama_batch_free(batch);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
|
@ -52,15 +52,15 @@ static void ggml_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne
|
||||||
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
||||||
float v;
|
float v;
|
||||||
if (type == GGML_TYPE_F16) {
|
if (type == GGML_TYPE_F16) {
|
||||||
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) data + i);
|
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]);
|
||||||
} else if (type == GGML_TYPE_F32) {
|
} else if (type == GGML_TYPE_F32) {
|
||||||
v = *(float *) data + i;
|
v = *(float *) &data[i];
|
||||||
} else if (type == GGML_TYPE_I32) {
|
} else if (type == GGML_TYPE_I32) {
|
||||||
v = (float) *(int32_t *) data + i;
|
v = (float) *(int32_t *) &data[i];
|
||||||
} else if (type == GGML_TYPE_I16) {
|
} else if (type == GGML_TYPE_I16) {
|
||||||
v = (float) *(int16_t *) data + i;
|
v = (float) *(int16_t *) &data[i];
|
||||||
} else if (type == GGML_TYPE_I8) {
|
} else if (type == GGML_TYPE_I8) {
|
||||||
v = (float) *(int8_t *) data + i;
|
v = (float) *(int8_t *) &data[i];
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
if (params.random_prompt) {
|
if (params.random_prompt) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = string_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
|
@ -176,7 +176,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OK = run(ctx, params);
|
bool OK = run(ctx, params);
|
||||||
|
|
|
@ -563,8 +563,8 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
// not capturing these, to silcence warnings
|
// not capturing these, to silcence warnings
|
||||||
const int rope_mode = 0;
|
const int rope_mode = 0;
|
||||||
|
|
||||||
return ggml_rope_custom(ctx,
|
return ggml_rope_ext(ctx,
|
||||||
t, KQ_pos, n_rot, rope_mode, n_ctx, 0,
|
t, KQ_pos, nullptr, n_rot, rope_mode, n_ctx, 0,
|
||||||
rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
|
rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
@ -643,7 +643,8 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
struct ggml_tensor * t15 = ggml_permute (ctx, t12, 0, 3, 1, 2); set_name(t15, "t15"); assert_shape_4d(t15, N, n_embd_head, n_head_kv, n_batch);
|
struct ggml_tensor * t15 = ggml_permute (ctx, t12, 0, 3, 1, 2); set_name(t15, "t15"); assert_shape_4d(t15, N, n_embd_head, n_head_kv, n_batch);
|
||||||
struct ggml_tensor * t16;
|
struct ggml_tensor * t16;
|
||||||
if (enable_flash_attn) {
|
if (enable_flash_attn) {
|
||||||
t16 = ggml_flash_attn(ctx, t13, t14, t15, true); set_name(t16, "t16"); assert_shape_4d(t16, n_embd_head, N, n_head, n_batch);
|
GGML_ASSERT(false && "TODO: ggml_flash_attn_ext() not yet supported");
|
||||||
|
//t16 = ggml_flash_attn(ctx, t13, t14, t15, true); set_name(t16, "t16"); assert_shape_4d(t16, n_embd_head, N, n_head, n_batch);
|
||||||
} else {
|
} else {
|
||||||
struct ggml_tensor * t16_0 = ggml_mul_mat (ctx, t14, t13); set_name(t16_0, "t16_0"); assert_shape_4d(t16_0, N, N, n_head, n_batch);
|
struct ggml_tensor * t16_0 = ggml_mul_mat (ctx, t14, t13); set_name(t16_0, "t16_0"); assert_shape_4d(t16_0, N, N, n_head, n_batch);
|
||||||
struct ggml_tensor * t16_1 = ggml_scale_inplace (ctx, t16_0, kv_scale); set_name(t16_1, "t16_1"); assert_shape_4d(t16_1, N, N, n_head, n_batch);
|
struct ggml_tensor * t16_1 = ggml_scale_inplace (ctx, t16_0, kv_scale); set_name(t16_1, "t16_1"); assert_shape_4d(t16_1, N, N, n_head, n_batch);
|
||||||
|
|
|
@ -598,7 +598,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
if (params.random_prompt) {
|
if (params.random_prompt) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = string_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
sparams.dataset = params.prompt_file;
|
sparams.dataset = params.prompt_file;
|
||||||
|
@ -667,7 +667,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OK = compute_imatrix(ctx, params, compute_ppl, from_chunk);
|
bool OK = compute_imatrix(ctx, params, compute_ppl, from_chunk);
|
||||||
|
|
|
@ -50,9 +50,9 @@ static void write_logfile(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string timestamp = get_sortable_timestamp();
|
const std::string timestamp = string_get_sortable_timestamp();
|
||||||
|
|
||||||
const bool success = create_directory_with_parents(params.logdir);
|
const bool success = fs_create_directory_with_parents(params.logdir);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
||||||
__func__, params.logdir.c_str());
|
__func__, params.logdir.c_str());
|
||||||
|
@ -70,7 +70,7 @@ static void write_logfile(
|
||||||
fprintf(logfile, "binary: infill\n");
|
fprintf(logfile, "binary: infill\n");
|
||||||
char model_desc[128];
|
char model_desc[128];
|
||||||
llama_model_desc(model, model_desc, sizeof(model_desc));
|
llama_model_desc(model, model_desc, sizeof(model_desc));
|
||||||
dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc);
|
yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc);
|
||||||
|
|
||||||
fprintf(logfile, "\n");
|
fprintf(logfile, "\n");
|
||||||
fprintf(logfile, "######################\n");
|
fprintf(logfile, "######################\n");
|
||||||
|
@ -78,8 +78,8 @@ static void write_logfile(
|
||||||
fprintf(logfile, "######################\n");
|
fprintf(logfile, "######################\n");
|
||||||
fprintf(logfile, "\n");
|
fprintf(logfile, "\n");
|
||||||
|
|
||||||
dump_string_yaml_multiline(logfile, "output", output.c_str());
|
yaml_dump_string_multiline(logfile, "output", output.c_str());
|
||||||
dump_vector_int_yaml(logfile, "output_tokens", output_tokens);
|
yaml_dump_vector_int(logfile, "output_tokens", output_tokens);
|
||||||
|
|
||||||
llama_dump_timing_info_yaml(logfile, ctx);
|
llama_dump_timing_info_yaml(logfile, ctx);
|
||||||
fclose(logfile);
|
fclose(logfile);
|
||||||
|
@ -236,7 +236,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
LOG_TEE("%s\n", get_system_info(params).c_str());
|
LOG_TEE("%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
const bool add_bos = llama_should_add_bos_token(model);
|
const bool add_bos = llama_should_add_bos_token(model);
|
||||||
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
||||||
|
@ -621,8 +621,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
if (params.escape) {
|
if (params.escape) {
|
||||||
//process escape sequences, for the initial prompt this is done in common.cpp when we load the params, but for the interactive mode we need to do it here
|
//process escape sequences, for the initial prompt this is done in common.cpp when we load the params, but for the interactive mode we need to do it here
|
||||||
process_escapes(params.input_prefix);
|
string_process_escapes(params.input_prefix);
|
||||||
process_escapes(params.input_suffix);
|
string_process_escapes(params.input_suffix);
|
||||||
}
|
}
|
||||||
suff_rm_leading_spc = params.escape;
|
suff_rm_leading_spc = params.escape;
|
||||||
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
||||||
|
|
|
@ -26,16 +26,21 @@ options:
|
||||||
-m, --model <filename> (default: models/7B/ggml-model-q4_0.gguf)
|
-m, --model <filename> (default: models/7B/ggml-model-q4_0.gguf)
|
||||||
-p, --n-prompt <n> (default: 512)
|
-p, --n-prompt <n> (default: 512)
|
||||||
-n, --n-gen <n> (default: 128)
|
-n, --n-gen <n> (default: 128)
|
||||||
-b, --batch-size <n> (default: 512)
|
-pg <pp,tg> (default: 512,128)
|
||||||
-ctk <t>, --cache-type-k <t> (default: f16)
|
-b, --batch-size <n> (default: 2048)
|
||||||
-ctv <t>, --cache-type-v <t> (default: f16)
|
-ub, --ubatch-size <n> (default: 512)
|
||||||
-t, --threads <n> (default: 112)
|
-ctk, --cache-type-k <t> (default: f16)
|
||||||
|
-ctv, --cache-type-v <t> (default: f16)
|
||||||
|
-t, --threads <n> (default: 16)
|
||||||
-ngl, --n-gpu-layers <n> (default: 99)
|
-ngl, --n-gpu-layers <n> (default: 99)
|
||||||
-sm, --split-mode <none|layer|row> (default: layer)
|
-sm, --split-mode <none|layer|row> (default: layer)
|
||||||
-mg, --main-gpu <i> (default: 0)
|
-mg, --main-gpu <i> (default: 0)
|
||||||
-nkvo, --no-kv-offload <0|1> (default: 0)
|
-nkvo, --no-kv-offload <0|1> (default: 0)
|
||||||
|
-fa, --flash-attn <0|1> (default: 0)
|
||||||
-mmp, --mmap <0|1> (default: 1)
|
-mmp, --mmap <0|1> (default: 1)
|
||||||
-ts, --tensor_split <ts0/ts1/..> (default: 0)
|
--numa <distribute|isolate|numactl> (default: disabled)
|
||||||
|
-embd, --embeddings <0|1> (default: 0)
|
||||||
|
-ts, --tensor-split <ts0/ts1/..> (default: 0)
|
||||||
-r, --repetitions <n> (default: 5)
|
-r, --repetitions <n> (default: 5)
|
||||||
-o, --output <csv|json|md|sql> (default: md)
|
-o, --output <csv|json|md|sql> (default: md)
|
||||||
-v, --verbose (default: 0)
|
-v, --verbose (default: 0)
|
||||||
|
@ -43,10 +48,11 @@ options:
|
||||||
Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.
|
Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.
|
||||||
```
|
```
|
||||||
|
|
||||||
llama-bench can perform two types of tests:
|
llama-bench can perform three types of tests:
|
||||||
|
|
||||||
- Prompt processing (pp): processing a prompt in batches (`-p`)
|
- Prompt processing (pp): processing a prompt in batches (`-p`)
|
||||||
- Text generation (tg): generating a sequence of tokens (`-n`)
|
- Text generation (tg): generating a sequence of tokens (`-n`)
|
||||||
|
- Prompt processing + text generation (pg): processing a prompt followed by generating a sequence of tokens (`-pg`)
|
||||||
|
|
||||||
With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`).
|
With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`).
|
||||||
|
|
||||||
|
|
|
@ -161,16 +161,24 @@ static const char * split_mode_str(llama_split_mode mode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::string pair_str(const std::pair<int, int> & p) {
|
||||||
|
static char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%d,%d", p.first, p.second);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
struct cmd_params {
|
struct cmd_params {
|
||||||
std::vector<std::string> model;
|
std::vector<std::string> model;
|
||||||
std::vector<int> n_prompt;
|
std::vector<int> n_prompt;
|
||||||
std::vector<int> n_gen;
|
std::vector<int> n_gen;
|
||||||
|
std::vector<std::pair<int, int>> n_pg;
|
||||||
std::vector<int> n_batch;
|
std::vector<int> n_batch;
|
||||||
std::vector<int> n_ubatch;
|
std::vector<int> n_ubatch;
|
||||||
std::vector<ggml_type> type_k;
|
std::vector<ggml_type> type_k;
|
||||||
std::vector<ggml_type> type_v;
|
std::vector<ggml_type> type_v;
|
||||||
std::vector<int> n_threads;
|
std::vector<int> n_threads;
|
||||||
std::vector<int> n_gpu_layers;
|
std::vector<int> n_gpu_layers;
|
||||||
|
std::vector<std::string> rpc_servers;
|
||||||
std::vector<llama_split_mode> split_mode;
|
std::vector<llama_split_mode> split_mode;
|
||||||
std::vector<int> main_gpu;
|
std::vector<int> main_gpu;
|
||||||
std::vector<bool> no_kv_offload;
|
std::vector<bool> no_kv_offload;
|
||||||
|
@ -188,12 +196,14 @@ static const cmd_params cmd_params_defaults = {
|
||||||
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
||||||
/* n_prompt */ {512},
|
/* n_prompt */ {512},
|
||||||
/* n_gen */ {128},
|
/* n_gen */ {128},
|
||||||
|
/* n_pg */ {},
|
||||||
/* n_batch */ {2048},
|
/* n_batch */ {2048},
|
||||||
/* n_ubatch */ {512},
|
/* n_ubatch */ {512},
|
||||||
/* type_k */ {GGML_TYPE_F16},
|
/* type_k */ {GGML_TYPE_F16},
|
||||||
/* type_v */ {GGML_TYPE_F16},
|
/* type_v */ {GGML_TYPE_F16},
|
||||||
/* n_threads */ {get_math_cpu_count()},
|
/* n_threads */ {cpu_get_num_math()},
|
||||||
/* n_gpu_layers */ {99},
|
/* n_gpu_layers */ {99},
|
||||||
|
/* rpc_servers */ {""},
|
||||||
/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
|
/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
|
||||||
/* main_gpu */ {0},
|
/* main_gpu */ {0},
|
||||||
/* no_kv_offload */ {false},
|
/* no_kv_offload */ {false},
|
||||||
|
@ -215,12 +225,14 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||||
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
|
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
|
||||||
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
||||||
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
||||||
|
printf(" -pg <pp,tg> (default: %s)\n", join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str());
|
||||||
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
||||||
printf(" -ub N, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
printf(" -ub, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
||||||
printf(" -ctk <t>, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
printf(" -ctk, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
||||||
printf(" -ctv <t>, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
printf(" -ctv, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
||||||
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
||||||
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
||||||
|
printf(" -rpc, --rpc <rpc_servers> (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str());
|
||||||
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
||||||
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
||||||
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
||||||
|
@ -304,6 +316,17 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
auto p = split<int>(argv[i], split_delim);
|
auto p = split<int>(argv[i], split_delim);
|
||||||
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
|
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "-pg") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<std::string>(argv[i], ',');
|
||||||
|
if (p.size() != 2) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])});
|
||||||
} else if (arg == "-b" || arg == "--batch-size") {
|
} else if (arg == "-b" || arg == "--batch-size") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
@ -364,6 +387,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
auto p = split<int>(argv[i], split_delim);
|
auto p = split<int>(argv[i], split_delim);
|
||||||
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
|
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "-rpc" || arg == "--rpc") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.rpc_servers.push_back(argv[i]);
|
||||||
} else if (arg == "-sm" || arg == "--split-mode") {
|
} else if (arg == "-sm" || arg == "--split-mode") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
@ -493,11 +522,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||||
if (params.model.empty()) { params.model = cmd_params_defaults.model; }
|
if (params.model.empty()) { params.model = cmd_params_defaults.model; }
|
||||||
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
||||||
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
||||||
|
if (params.n_pg.empty()) { params.n_pg = cmd_params_defaults.n_pg; }
|
||||||
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
||||||
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
||||||
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
||||||
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
|
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
|
||||||
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
|
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
|
||||||
|
if (params.rpc_servers.empty()) { params.rpc_servers = cmd_params_defaults.rpc_servers; }
|
||||||
if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
|
if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
|
||||||
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
|
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
|
||||||
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
||||||
|
@ -520,6 +551,7 @@ struct cmd_params_instance {
|
||||||
ggml_type type_v;
|
ggml_type type_v;
|
||||||
int n_threads;
|
int n_threads;
|
||||||
int n_gpu_layers;
|
int n_gpu_layers;
|
||||||
|
std::string rpc_servers;
|
||||||
llama_split_mode split_mode;
|
llama_split_mode split_mode;
|
||||||
int main_gpu;
|
int main_gpu;
|
||||||
bool no_kv_offload;
|
bool no_kv_offload;
|
||||||
|
@ -532,6 +564,9 @@ struct cmd_params_instance {
|
||||||
llama_model_params mparams = llama_model_default_params();
|
llama_model_params mparams = llama_model_default_params();
|
||||||
|
|
||||||
mparams.n_gpu_layers = n_gpu_layers;
|
mparams.n_gpu_layers = n_gpu_layers;
|
||||||
|
if (!rpc_servers.empty()) {
|
||||||
|
mparams.rpc_servers = rpc_servers.c_str();
|
||||||
|
}
|
||||||
mparams.split_mode = split_mode;
|
mparams.split_mode = split_mode;
|
||||||
mparams.main_gpu = main_gpu;
|
mparams.main_gpu = main_gpu;
|
||||||
mparams.tensor_split = tensor_split.data();
|
mparams.tensor_split = tensor_split.data();
|
||||||
|
@ -543,6 +578,7 @@ struct cmd_params_instance {
|
||||||
bool equal_mparams(const cmd_params_instance & other) const {
|
bool equal_mparams(const cmd_params_instance & other) const {
|
||||||
return model == other.model &&
|
return model == other.model &&
|
||||||
n_gpu_layers == other.n_gpu_layers &&
|
n_gpu_layers == other.n_gpu_layers &&
|
||||||
|
rpc_servers == other.rpc_servers &&
|
||||||
split_mode == other.split_mode &&
|
split_mode == other.split_mode &&
|
||||||
main_gpu == other.main_gpu &&
|
main_gpu == other.main_gpu &&
|
||||||
use_mmap == other.use_mmap &&
|
use_mmap == other.use_mmap &&
|
||||||
|
@ -571,6 +607,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||||
// this ordering minimizes the number of times that each model needs to be reloaded
|
// this ordering minimizes the number of times that each model needs to be reloaded
|
||||||
for (const auto & m : params.model)
|
for (const auto & m : params.model)
|
||||||
for (const auto & nl : params.n_gpu_layers)
|
for (const auto & nl : params.n_gpu_layers)
|
||||||
|
for (const auto & rpc : params.rpc_servers)
|
||||||
for (const auto & sm : params.split_mode)
|
for (const auto & sm : params.split_mode)
|
||||||
for (const auto & mg : params.main_gpu)
|
for (const auto & mg : params.main_gpu)
|
||||||
for (const auto & ts : params.tensor_split)
|
for (const auto & ts : params.tensor_split)
|
||||||
|
@ -597,6 +634,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||||
/* .type_v = */ tv,
|
/* .type_v = */ tv,
|
||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
/* .n_gpu_layers = */ nl,
|
/* .n_gpu_layers = */ nl,
|
||||||
|
/* .rpc_servers = */ rpc,
|
||||||
/* .split_mode = */ sm,
|
/* .split_mode = */ sm,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
/* .no_kv_offload= */ nkvo,
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
@ -622,6 +660,33 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||||
/* .type_v = */ tv,
|
/* .type_v = */ tv,
|
||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
/* .n_gpu_layers = */ nl,
|
/* .n_gpu_layers = */ nl,
|
||||||
|
/* .rpc_servers = */ rpc,
|
||||||
|
/* .split_mode = */ sm,
|
||||||
|
/* .main_gpu = */ mg,
|
||||||
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
/* .flash_attn = */ fa,
|
||||||
|
/* .tensor_split = */ ts,
|
||||||
|
/* .use_mmap = */ mmp,
|
||||||
|
/* .embeddings = */ embd,
|
||||||
|
};
|
||||||
|
instances.push_back(instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & n_pg : params.n_pg) {
|
||||||
|
if (n_pg.first == 0 && n_pg.second == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
cmd_params_instance instance = {
|
||||||
|
/* .model = */ m,
|
||||||
|
/* .n_prompt = */ n_pg.first,
|
||||||
|
/* .n_gen = */ n_pg.second,
|
||||||
|
/* .n_batch = */ nb,
|
||||||
|
/* .n_ubatch = */ nub,
|
||||||
|
/* .type_k = */ tk,
|
||||||
|
/* .type_v = */ tv,
|
||||||
|
/* .n_threads = */ nt,
|
||||||
|
/* .n_gpu_layers = */ nl,
|
||||||
|
/* .rpc_servers = */ rpc,
|
||||||
/* .split_mode = */ sm,
|
/* .split_mode = */ sm,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
/* .no_kv_offload= */ nkvo,
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
@ -646,6 +711,7 @@ struct test {
|
||||||
static const bool kompute;
|
static const bool kompute;
|
||||||
static const bool metal;
|
static const bool metal;
|
||||||
static const bool sycl;
|
static const bool sycl;
|
||||||
|
static const bool rpc;
|
||||||
static const bool gpu_blas;
|
static const bool gpu_blas;
|
||||||
static const bool blas;
|
static const bool blas;
|
||||||
static const std::string cpu_info;
|
static const std::string cpu_info;
|
||||||
|
@ -744,6 +810,9 @@ struct test {
|
||||||
if (sycl) {
|
if (sycl) {
|
||||||
return GGML_SYCL_NAME;
|
return GGML_SYCL_NAME;
|
||||||
}
|
}
|
||||||
|
if (rpc) {
|
||||||
|
return "RPC";
|
||||||
|
}
|
||||||
if (gpu_blas) {
|
if (gpu_blas) {
|
||||||
return "GPU BLAS";
|
return "GPU BLAS";
|
||||||
}
|
}
|
||||||
|
@ -757,7 +826,7 @@ struct test {
|
||||||
static const std::vector<std::string> & get_fields() {
|
static const std::vector<std::string> & get_fields() {
|
||||||
static const std::vector<std::string> fields = {
|
static const std::vector<std::string> fields = {
|
||||||
"build_commit", "build_number",
|
"build_commit", "build_number",
|
||||||
"cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "gpu_blas", "blas",
|
"cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "rpc", "gpu_blas", "blas",
|
||||||
"cpu_info", "gpu_info",
|
"cpu_info", "gpu_info",
|
||||||
"model_filename", "model_type", "model_size", "model_n_params",
|
"model_filename", "model_type", "model_size", "model_n_params",
|
||||||
"n_batch", "n_ubatch",
|
"n_batch", "n_ubatch",
|
||||||
|
@ -813,7 +882,7 @@ struct test {
|
||||||
std::vector<std::string> values = {
|
std::vector<std::string> values = {
|
||||||
build_commit, std::to_string(build_number),
|
build_commit, std::to_string(build_number),
|
||||||
std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(vulkan),
|
std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(vulkan),
|
||||||
std::to_string(metal), std::to_string(sycl), std::to_string(gpu_blas), std::to_string(blas),
|
std::to_string(metal), std::to_string(sycl), std::to_string(rpc), std::to_string(gpu_blas), std::to_string(blas),
|
||||||
cpu_info, gpu_info,
|
cpu_info, gpu_info,
|
||||||
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
||||||
std::to_string(n_batch), std::to_string(n_ubatch),
|
std::to_string(n_batch), std::to_string(n_ubatch),
|
||||||
|
@ -848,6 +917,7 @@ const bool test::metal = !!ggml_cpu_has_metal();
|
||||||
const bool test::gpu_blas = !!ggml_cpu_has_gpublas();
|
const bool test::gpu_blas = !!ggml_cpu_has_gpublas();
|
||||||
const bool test::blas = !!ggml_cpu_has_blas();
|
const bool test::blas = !!ggml_cpu_has_blas();
|
||||||
const bool test::sycl = !!ggml_cpu_has_sycl();
|
const bool test::sycl = !!ggml_cpu_has_sycl();
|
||||||
|
const bool test::rpc = !!ggml_cpu_has_rpc();
|
||||||
const std::string test::cpu_info = get_cpu_info();
|
const std::string test::cpu_info = get_cpu_info();
|
||||||
const std::string test::gpu_info = get_gpu_info();
|
const std::string test::gpu_info = get_gpu_info();
|
||||||
|
|
||||||
|
@ -965,6 +1035,9 @@ struct markdown_printer : public printer {
|
||||||
if (field == "n_gpu_layers") {
|
if (field == "n_gpu_layers") {
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
if (field == "test") {
|
||||||
|
return 13;
|
||||||
|
}
|
||||||
|
|
||||||
int width = std::max((int)field.length(), 10);
|
int width = std::max((int)field.length(), 10);
|
||||||
|
|
||||||
|
@ -1091,12 +1164,11 @@ struct markdown_printer : public printer {
|
||||||
value = test::get_backend();
|
value = test::get_backend();
|
||||||
} else if (field == "test") {
|
} else if (field == "test") {
|
||||||
if (t.n_prompt > 0 && t.n_gen == 0) {
|
if (t.n_prompt > 0 && t.n_gen == 0) {
|
||||||
snprintf(buf, sizeof(buf), "pp %d", t.n_prompt);
|
snprintf(buf, sizeof(buf), "pp%d", t.n_prompt);
|
||||||
} else if (t.n_gen > 0 && t.n_prompt == 0) {
|
} else if (t.n_gen > 0 && t.n_prompt == 0) {
|
||||||
snprintf(buf, sizeof(buf), "tg %d", t.n_gen);
|
snprintf(buf, sizeof(buf), "tg%d", t.n_gen);
|
||||||
} else {
|
} else {
|
||||||
assert(false);
|
snprintf(buf, sizeof(buf), "pp%d+tg%d", t.n_prompt, t.n_gen);
|
||||||
exit(1);
|
|
||||||
}
|
}
|
||||||
value = buf;
|
value = buf;
|
||||||
} else if (field == "t/s") {
|
} else if (field == "t/s") {
|
||||||
|
@ -1297,6 +1369,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
uint64_t t_start = get_time_ns();
|
uint64_t t_start = get_time_ns();
|
||||||
|
|
||||||
if (t.n_prompt > 0) {
|
if (t.n_prompt > 0) {
|
||||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,8 +7,6 @@ android {
|
||||||
namespace = "com.example.llama"
|
namespace = "com.example.llama"
|
||||||
compileSdk = 34
|
compileSdk = 34
|
||||||
|
|
||||||
ndkVersion = "26.1.10909125"
|
|
||||||
|
|
||||||
defaultConfig {
|
defaultConfig {
|
||||||
applicationId = "com.example.llama"
|
applicationId = "com.example.llama"
|
||||||
minSdk = 33
|
minSdk = 33
|
||||||
|
@ -20,17 +18,6 @@ android {
|
||||||
vectorDrawables {
|
vectorDrawables {
|
||||||
useSupportLibrary = true
|
useSupportLibrary = true
|
||||||
}
|
}
|
||||||
ndk {
|
|
||||||
// Add NDK properties if wanted, e.g.
|
|
||||||
// abiFilters += listOf("arm64-v8a")
|
|
||||||
}
|
|
||||||
externalNativeBuild {
|
|
||||||
cmake {
|
|
||||||
arguments += "-DCMAKE_BUILD_TYPE=Release"
|
|
||||||
cppFlags += listOf()
|
|
||||||
arguments += listOf()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buildTypes {
|
buildTypes {
|
||||||
|
@ -55,17 +42,6 @@ android {
|
||||||
composeOptions {
|
composeOptions {
|
||||||
kotlinCompilerExtensionVersion = "1.5.1"
|
kotlinCompilerExtensionVersion = "1.5.1"
|
||||||
}
|
}
|
||||||
packaging {
|
|
||||||
resources {
|
|
||||||
excludes += "/META-INF/{AL2.0,LGPL2.1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
externalNativeBuild {
|
|
||||||
cmake {
|
|
||||||
path = file("src/main/cpp/CMakeLists.txt")
|
|
||||||
version = "3.22.1"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dependencies {
|
dependencies {
|
||||||
|
@ -78,6 +54,7 @@ dependencies {
|
||||||
implementation("androidx.compose.ui:ui-graphics")
|
implementation("androidx.compose.ui:ui-graphics")
|
||||||
implementation("androidx.compose.ui:ui-tooling-preview")
|
implementation("androidx.compose.ui:ui-tooling-preview")
|
||||||
implementation("androidx.compose.material3:material3")
|
implementation("androidx.compose.material3:material3")
|
||||||
|
implementation(project(":llama"))
|
||||||
testImplementation("junit:junit:4.13.2")
|
testImplementation("junit:junit:4.13.2")
|
||||||
androidTestImplementation("androidx.test.ext:junit:1.1.5")
|
androidTestImplementation("androidx.test.ext:junit:1.1.5")
|
||||||
androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1")
|
androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1")
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
package com.example.llama
|
package com.example.llama
|
||||||
|
|
||||||
|
import android.llama.cpp.LLamaAndroid
|
||||||
import android.util.Log
|
import android.util.Log
|
||||||
import androidx.compose.runtime.getValue
|
import androidx.compose.runtime.getValue
|
||||||
import androidx.compose.runtime.mutableStateOf
|
import androidx.compose.runtime.mutableStateOf
|
||||||
|
@ -9,7 +10,7 @@ import androidx.lifecycle.viewModelScope
|
||||||
import kotlinx.coroutines.flow.catch
|
import kotlinx.coroutines.flow.catch
|
||||||
import kotlinx.coroutines.launch
|
import kotlinx.coroutines.launch
|
||||||
|
|
||||||
class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() {
|
class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instance()): ViewModel() {
|
||||||
companion object {
|
companion object {
|
||||||
@JvmStatic
|
@JvmStatic
|
||||||
private val NanosPerSecond = 1_000_000_000.0
|
private val NanosPerSecond = 1_000_000_000.0
|
||||||
|
@ -28,7 +29,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() {
|
||||||
|
|
||||||
viewModelScope.launch {
|
viewModelScope.launch {
|
||||||
try {
|
try {
|
||||||
llm.unload()
|
llamaAndroid.unload()
|
||||||
} catch (exc: IllegalStateException) {
|
} catch (exc: IllegalStateException) {
|
||||||
messages += exc.message!!
|
messages += exc.message!!
|
||||||
}
|
}
|
||||||
|
@ -44,7 +45,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() {
|
||||||
messages += ""
|
messages += ""
|
||||||
|
|
||||||
viewModelScope.launch {
|
viewModelScope.launch {
|
||||||
llm.send(text)
|
llamaAndroid.send(text)
|
||||||
.catch {
|
.catch {
|
||||||
Log.e(tag, "send() failed", it)
|
Log.e(tag, "send() failed", it)
|
||||||
messages += it.message!!
|
messages += it.message!!
|
||||||
|
@ -57,7 +58,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() {
|
||||||
viewModelScope.launch {
|
viewModelScope.launch {
|
||||||
try {
|
try {
|
||||||
val start = System.nanoTime()
|
val start = System.nanoTime()
|
||||||
val warmupResult = llm.bench(pp, tg, pl, nr)
|
val warmupResult = llamaAndroid.bench(pp, tg, pl, nr)
|
||||||
val end = System.nanoTime()
|
val end = System.nanoTime()
|
||||||
|
|
||||||
messages += warmupResult
|
messages += warmupResult
|
||||||
|
@ -70,7 +71,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() {
|
||||||
return@launch
|
return@launch
|
||||||
}
|
}
|
||||||
|
|
||||||
messages += llm.bench(512, 128, 1, 3)
|
messages += llamaAndroid.bench(512, 128, 1, 3)
|
||||||
} catch (exc: IllegalStateException) {
|
} catch (exc: IllegalStateException) {
|
||||||
Log.e(tag, "bench() failed", exc)
|
Log.e(tag, "bench() failed", exc)
|
||||||
messages += exc.message!!
|
messages += exc.message!!
|
||||||
|
@ -81,7 +82,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() {
|
||||||
fun load(pathToModel: String) {
|
fun load(pathToModel: String) {
|
||||||
viewModelScope.launch {
|
viewModelScope.launch {
|
||||||
try {
|
try {
|
||||||
llm.load(pathToModel)
|
llamaAndroid.load(pathToModel)
|
||||||
messages += "Loaded $pathToModel"
|
messages += "Loaded $pathToModel"
|
||||||
} catch (exc: IllegalStateException) {
|
} catch (exc: IllegalStateException) {
|
||||||
Log.e(tag, "load() failed", exc)
|
Log.e(tag, "load() failed", exc)
|
||||||
|
|
|
@ -2,4 +2,5 @@
|
||||||
plugins {
|
plugins {
|
||||||
id("com.android.application") version "8.2.0" apply false
|
id("com.android.application") version "8.2.0" apply false
|
||||||
id("org.jetbrains.kotlin.android") version "1.9.0" apply false
|
id("org.jetbrains.kotlin.android") version "1.9.0" apply false
|
||||||
|
id("com.android.library") version "8.2.0" apply false
|
||||||
}
|
}
|
||||||
|
|
1
examples/llama.android/llama/.gitignore
vendored
Normal file
1
examples/llama.android/llama/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
/build
|
55
examples/llama.android/llama/CMakeLists.txt
Normal file
55
examples/llama.android/llama/CMakeLists.txt
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
|
||||||
|
# For more information about using CMake with Android Studio, read the
|
||||||
|
# documentation: https://d.android.com/studio/projects/add-native-code.html.
|
||||||
|
# For more examples on how to use CMake, see https://github.com/android/ndk-samples.
|
||||||
|
|
||||||
|
# Sets the minimum CMake version required for this project.
|
||||||
|
cmake_minimum_required(VERSION 3.22.1)
|
||||||
|
|
||||||
|
# Declares the project name. The project name can be accessed via ${ PROJECT_NAME},
|
||||||
|
# Since this is the top level CMakeLists.txt, the project name is also accessible
|
||||||
|
# with ${CMAKE_PROJECT_NAME} (both CMake variables are in-sync within the top level
|
||||||
|
# build script scope).
|
||||||
|
project("llama-android")
|
||||||
|
|
||||||
|
## Fetch latest llama.cpp from GitHub
|
||||||
|
#include(FetchContent)
|
||||||
|
#FetchContent_Declare(
|
||||||
|
# llama
|
||||||
|
# GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
|
||||||
|
# GIT_TAG master
|
||||||
|
#)
|
||||||
|
#
|
||||||
|
## Also provides "common"
|
||||||
|
#FetchContent_MakeAvailable(llama)
|
||||||
|
|
||||||
|
# llama.cpp CI uses the code from the current branch
|
||||||
|
# ref: https://github.com/ggerganov/llama.cpp/pull/7341#issuecomment-2117617700
|
||||||
|
add_subdirectory(../../../../../../ build-llama)
|
||||||
|
|
||||||
|
# Creates and names a library, sets it as either STATIC
|
||||||
|
# or SHARED, and provides the relative paths to its source code.
|
||||||
|
# You can define multiple libraries, and CMake builds them for you.
|
||||||
|
# Gradle automatically packages shared libraries with your APK.
|
||||||
|
#
|
||||||
|
# In this top level CMakeLists.txt, ${CMAKE_PROJECT_NAME} is used to define
|
||||||
|
# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME}
|
||||||
|
# is preferred for the same purpose.
|
||||||
|
#
|
||||||
|
# In order to load a library into your app from Java/Kotlin, you must call
|
||||||
|
# System.loadLibrary() and pass the name of the library defined here;
|
||||||
|
# for GameActivity/NativeActivity derived applications, the same library name must be
|
||||||
|
# used in the AndroidManifest.xml file.
|
||||||
|
add_library(${CMAKE_PROJECT_NAME} SHARED
|
||||||
|
# List C/C++ source files with relative paths to this CMakeLists.txt.
|
||||||
|
llama-android.cpp)
|
||||||
|
|
||||||
|
# Specifies libraries CMake should link to your target library. You
|
||||||
|
# can link libraries from various origins, such as libraries defined in this
|
||||||
|
# build script, prebuilt third-party libraries, or Android system libraries.
|
||||||
|
target_link_libraries(${CMAKE_PROJECT_NAME}
|
||||||
|
# List libraries link to the target library
|
||||||
|
llama
|
||||||
|
common
|
||||||
|
android
|
||||||
|
log)
|
68
examples/llama.android/llama/build.gradle.kts
Normal file
68
examples/llama.android/llama/build.gradle.kts
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
plugins {
|
||||||
|
id("com.android.library")
|
||||||
|
id("org.jetbrains.kotlin.android")
|
||||||
|
}
|
||||||
|
|
||||||
|
android {
|
||||||
|
namespace = "android.llama.cpp"
|
||||||
|
compileSdk = 34
|
||||||
|
|
||||||
|
defaultConfig {
|
||||||
|
minSdk = 33
|
||||||
|
|
||||||
|
testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner"
|
||||||
|
consumerProguardFiles("consumer-rules.pro")
|
||||||
|
ndk {
|
||||||
|
// Add NDK properties if wanted, e.g.
|
||||||
|
// abiFilters += listOf("arm64-v8a")
|
||||||
|
}
|
||||||
|
externalNativeBuild {
|
||||||
|
cmake {
|
||||||
|
arguments += "-DCMAKE_BUILD_TYPE=Release"
|
||||||
|
cppFlags += listOf()
|
||||||
|
arguments += listOf()
|
||||||
|
|
||||||
|
cppFlags("")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildTypes {
|
||||||
|
release {
|
||||||
|
isMinifyEnabled = false
|
||||||
|
proguardFiles(
|
||||||
|
getDefaultProguardFile("proguard-android-optimize.txt"),
|
||||||
|
"proguard-rules.pro"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
externalNativeBuild {
|
||||||
|
cmake {
|
||||||
|
path("src/main/cpp/CMakeLists.txt")
|
||||||
|
version = "3.22.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
compileOptions {
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
}
|
||||||
|
kotlinOptions {
|
||||||
|
jvmTarget = "1.8"
|
||||||
|
}
|
||||||
|
|
||||||
|
packaging {
|
||||||
|
resources {
|
||||||
|
excludes += "/META-INF/{AL2.0,LGPL2.1}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
|
||||||
|
implementation("androidx.core:core-ktx:1.12.0")
|
||||||
|
implementation("androidx.appcompat:appcompat:1.6.1")
|
||||||
|
implementation("com.google.android.material:material:1.11.0")
|
||||||
|
testImplementation("junit:junit:4.13.2")
|
||||||
|
androidTestImplementation("androidx.test.ext:junit:1.1.5")
|
||||||
|
androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1")
|
||||||
|
}
|
0
examples/llama.android/llama/consumer-rules.pro
Normal file
0
examples/llama.android/llama/consumer-rules.pro
Normal file
21
examples/llama.android/llama/proguard-rules.pro
vendored
Normal file
21
examples/llama.android/llama/proguard-rules.pro
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
# Add project specific ProGuard rules here.
|
||||||
|
# You can control the set of applied configuration files using the
|
||||||
|
# proguardFiles setting in build.gradle.
|
||||||
|
#
|
||||||
|
# For more details, see
|
||||||
|
# http://developer.android.com/guide/developing/tools/proguard.html
|
||||||
|
|
||||||
|
# If your project uses WebView with JS, uncomment the following
|
||||||
|
# and specify the fully qualified class name to the JavaScript interface
|
||||||
|
# class:
|
||||||
|
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
|
||||||
|
# public *;
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Uncomment this to preserve the line number information for
|
||||||
|
# debugging stack traces.
|
||||||
|
#-keepattributes SourceFile,LineNumberTable
|
||||||
|
|
||||||
|
# If you keep the line number information, uncomment this to
|
||||||
|
# hide the original source file name.
|
||||||
|
#-renamesourcefileattribute SourceFile
|
|
@ -0,0 +1,24 @@
|
||||||
|
package android.llama.cpp
|
||||||
|
|
||||||
|
import androidx.test.platform.app.InstrumentationRegistry
|
||||||
|
import androidx.test.ext.junit.runners.AndroidJUnit4
|
||||||
|
|
||||||
|
import org.junit.Test
|
||||||
|
import org.junit.runner.RunWith
|
||||||
|
|
||||||
|
import org.junit.Assert.*
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Instrumented test, which will execute on an Android device.
|
||||||
|
*
|
||||||
|
* See [testing documentation](http://d.android.com/tools/testing).
|
||||||
|
*/
|
||||||
|
@RunWith(AndroidJUnit4::class)
|
||||||
|
class ExampleInstrumentedTest {
|
||||||
|
@Test
|
||||||
|
fun useAppContext() {
|
||||||
|
// Context of the app under test.
|
||||||
|
val appContext = InstrumentationRegistry.getInstrumentation().targetContext
|
||||||
|
assertEquals("android.llama.cpp.test", appContext.packageName)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
|
||||||
|
|
||||||
|
</manifest>
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
# For more information about using CMake with Android Studio, read the
|
# For more information about using CMake with Android Studio, read the
|
||||||
# documentation: https://d.android.com/studio/projects/add-native-code.html.
|
# documentation: https://d.android.com/studio/projects/add-native-code.html.
|
||||||
# For more examples on how to use CMake, see https://github.com/android/ndk-samples.
|
# For more examples on how to use CMake, see https://github.com/android/ndk-samples.
|
|
@ -81,7 +81,7 @@ static void log_callback(ggml_log_level level, const char * fmt, void * data) {
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jlong JNICALL
|
JNIEXPORT jlong JNICALL
|
||||||
Java_com_example_llama_Llm_load_1model(JNIEnv *env, jobject, jstring filename) {
|
Java_android_llama_cpp_LLamaAndroid_load_1model(JNIEnv *env, jobject, jstring filename) {
|
||||||
llama_model_params model_params = llama_model_default_params();
|
llama_model_params model_params = llama_model_default_params();
|
||||||
|
|
||||||
auto path_to_model = env->GetStringUTFChars(filename, 0);
|
auto path_to_model = env->GetStringUTFChars(filename, 0);
|
||||||
|
@ -101,13 +101,13 @@ Java_com_example_llama_Llm_load_1model(JNIEnv *env, jobject, jstring filename) {
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_free_1model(JNIEnv *, jobject, jlong model) {
|
Java_android_llama_cpp_LLamaAndroid_free_1model(JNIEnv *, jobject, jlong model) {
|
||||||
llama_free_model(reinterpret_cast<llama_model *>(model));
|
llama_free_model(reinterpret_cast<llama_model *>(model));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jlong JNICALL
|
JNIEXPORT jlong JNICALL
|
||||||
Java_com_example_llama_Llm_new_1context(JNIEnv *env, jobject, jlong jmodel) {
|
Java_android_llama_cpp_LLamaAndroid_new_1context(JNIEnv *env, jobject, jlong jmodel) {
|
||||||
auto model = reinterpret_cast<llama_model *>(jmodel);
|
auto model = reinterpret_cast<llama_model *>(jmodel);
|
||||||
|
|
||||||
if (!model) {
|
if (!model) {
|
||||||
|
@ -139,25 +139,25 @@ Java_com_example_llama_Llm_new_1context(JNIEnv *env, jobject, jlong jmodel) {
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_free_1context(JNIEnv *, jobject, jlong context) {
|
Java_android_llama_cpp_LLamaAndroid_free_1context(JNIEnv *, jobject, jlong context) {
|
||||||
llama_free(reinterpret_cast<llama_context *>(context));
|
llama_free(reinterpret_cast<llama_context *>(context));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_backend_1free(JNIEnv *, jobject) {
|
Java_android_llama_cpp_LLamaAndroid_backend_1free(JNIEnv *, jobject) {
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_log_1to_1android(JNIEnv *, jobject) {
|
Java_android_llama_cpp_LLamaAndroid_log_1to_1android(JNIEnv *, jobject) {
|
||||||
llama_log_set(log_callback, NULL);
|
llama_log_set(log_callback, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jstring JNICALL
|
JNIEXPORT jstring JNICALL
|
||||||
Java_com_example_llama_Llm_bench_1model(
|
Java_android_llama_cpp_LLamaAndroid_bench_1model(
|
||||||
JNIEnv *env,
|
JNIEnv *env,
|
||||||
jobject,
|
jobject,
|
||||||
jlong context_pointer,
|
jlong context_pointer,
|
||||||
|
@ -271,13 +271,13 @@ Java_com_example_llama_Llm_bench_1model(
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_free_1batch(JNIEnv *, jobject, jlong batch_pointer) {
|
Java_android_llama_cpp_LLamaAndroid_free_1batch(JNIEnv *, jobject, jlong batch_pointer) {
|
||||||
llama_batch_free(*reinterpret_cast<llama_batch *>(batch_pointer));
|
llama_batch_free(*reinterpret_cast<llama_batch *>(batch_pointer));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jlong JNICALL
|
JNIEXPORT jlong JNICALL
|
||||||
Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) {
|
Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) {
|
||||||
|
|
||||||
// Source: Copy of llama.cpp:llama_batch_init but heap-allocated.
|
// Source: Copy of llama.cpp:llama_batch_init but heap-allocated.
|
||||||
|
|
||||||
|
@ -313,19 +313,19 @@ Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint emb
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) {
|
Java_android_llama_cpp_LLamaAndroid_backend_1init(JNIEnv *, jobject) {
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jstring JNICALL
|
JNIEXPORT jstring JNICALL
|
||||||
Java_com_example_llama_Llm_system_1info(JNIEnv *env, jobject) {
|
Java_android_llama_cpp_LLamaAndroid_system_1info(JNIEnv *env, jobject) {
|
||||||
return env->NewStringUTF(llama_print_system_info());
|
return env->NewStringUTF(llama_print_system_info());
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jint JNICALL
|
JNIEXPORT jint JNICALL
|
||||||
Java_com_example_llama_Llm_completion_1init(
|
Java_android_llama_cpp_LLamaAndroid_completion_1init(
|
||||||
JNIEnv *env,
|
JNIEnv *env,
|
||||||
jobject,
|
jobject,
|
||||||
jlong context_pointer,
|
jlong context_pointer,
|
||||||
|
@ -376,7 +376,7 @@ Java_com_example_llama_Llm_completion_1init(
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT jstring JNICALL
|
JNIEXPORT jstring JNICALL
|
||||||
Java_com_example_llama_Llm_completion_1loop(
|
Java_android_llama_cpp_LLamaAndroid_completion_1loop(
|
||||||
JNIEnv * env,
|
JNIEnv * env,
|
||||||
jobject,
|
jobject,
|
||||||
jlong context_pointer,
|
jlong context_pointer,
|
||||||
|
@ -438,6 +438,6 @@ Java_com_example_llama_Llm_completion_1loop(
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_kv_1cache_1clear(JNIEnv *, jobject, jlong context) {
|
Java_android_llama_cpp_LLamaAndroid_kv_1cache_1clear(JNIEnv *, jobject, jlong context) {
|
||||||
llama_kv_cache_clear(reinterpret_cast<llama_context *>(context));
|
llama_kv_cache_clear(reinterpret_cast<llama_context *>(context));
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package com.example.llama
|
package android.llama.cpp
|
||||||
|
|
||||||
import android.util.Log
|
import android.util.Log
|
||||||
import kotlinx.coroutines.CoroutineDispatcher
|
import kotlinx.coroutines.CoroutineDispatcher
|
||||||
|
@ -10,7 +10,7 @@ import kotlinx.coroutines.withContext
|
||||||
import java.util.concurrent.Executors
|
import java.util.concurrent.Executors
|
||||||
import kotlin.concurrent.thread
|
import kotlin.concurrent.thread
|
||||||
|
|
||||||
class Llm {
|
class LLamaAndroid {
|
||||||
private val tag: String? = this::class.simpleName
|
private val tag: String? = this::class.simpleName
|
||||||
|
|
||||||
private val threadLocalState: ThreadLocal<State> = ThreadLocal.withInitial { State.Idle }
|
private val threadLocalState: ThreadLocal<State> = ThreadLocal.withInitial { State.Idle }
|
||||||
|
@ -165,8 +165,8 @@ class Llm {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enforce only one instance of Llm.
|
// Enforce only one instance of Llm.
|
||||||
private val _instance: Llm = Llm()
|
private val _instance: LLamaAndroid = LLamaAndroid()
|
||||||
|
|
||||||
fun instance(): Llm = _instance
|
fun instance(): LLamaAndroid = _instance
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
package android.llama.cpp
|
||||||
|
|
||||||
|
import org.junit.Test
|
||||||
|
|
||||||
|
import org.junit.Assert.*
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Example local unit test, which will execute on the development machine (host).
|
||||||
|
*
|
||||||
|
* See [testing documentation](http://d.android.com/tools/testing).
|
||||||
|
*/
|
||||||
|
class ExampleUnitTest {
|
||||||
|
@Test
|
||||||
|
fun addition_isCorrect() {
|
||||||
|
assertEquals(4, 2 + 2)
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,3 +15,4 @@ dependencyResolutionManagement {
|
||||||
|
|
||||||
rootProject.name = "LlamaAndroid"
|
rootProject.name = "LlamaAndroid"
|
||||||
include(":app")
|
include(":app")
|
||||||
|
include(":llama")
|
||||||
|
|
|
@ -54,10 +54,10 @@ python ./examples/llava/convert-image-encoder-to-gguf \
|
||||||
--projector-type ldpv2
|
--projector-type ldpv2
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
|
4. Use `examples/convert-legacy-llama.py` to convert the LLaMA part of LLaVA to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./convert.py path/to/MobileVLM-1.7B
|
python ./examples/convert-legacy-llama.py path/to/MobileVLM-1.7B
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k`
|
5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k`
|
||||||
|
|
|
@ -50,10 +50,10 @@ python ./examples/llava/llava-surgery.py -m ../llava-v1.5-7b
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
|
python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
|
5. Use `examples/convert-legacy-llama.py` to convert the LLaMA part of LLaVA to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./convert.py ../llava-v1.5-7b --skip-unknown
|
python ./examples/convert-legacy-llama.py ../llava-v1.5-7b --skip-unknown
|
||||||
```
|
```
|
||||||
|
|
||||||
Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
|
Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
|
||||||
|
@ -92,7 +92,7 @@ python ./examples/llava/convert-image-encoder-to-gguf.py -m vit --llava-projecto
|
||||||
|
|
||||||
6) Then convert the model to gguf format:
|
6) Then convert the model to gguf format:
|
||||||
```console
|
```console
|
||||||
python ./convert.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
python ./examples/convert-legacy-llama.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
||||||
```
|
```
|
||||||
|
|
||||||
7) And finally we can run the llava-cli using the 1.6 model version:
|
7) And finally we can run the llava-cli using the 1.6 model version:
|
||||||
|
|
|
@ -104,6 +104,7 @@ static std::string format(const char * fmt, ...) {
|
||||||
#define TN_POS_EMBD "%s.position_embd.weight"
|
#define TN_POS_EMBD "%s.position_embd.weight"
|
||||||
#define TN_CLASS_EMBD "v.class_embd"
|
#define TN_CLASS_EMBD "v.class_embd"
|
||||||
#define TN_PATCH_EMBD "v.patch_embd.weight"
|
#define TN_PATCH_EMBD "v.patch_embd.weight"
|
||||||
|
#define TN_PATCH_BIAS "v.patch_embd.bias"
|
||||||
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
|
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
|
||||||
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
|
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
|
||||||
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
|
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
|
||||||
|
@ -425,6 +426,7 @@ struct clip_vision_model {
|
||||||
// embeddings
|
// embeddings
|
||||||
struct ggml_tensor * class_embedding;
|
struct ggml_tensor * class_embedding;
|
||||||
struct ggml_tensor * patch_embeddings;
|
struct ggml_tensor * patch_embeddings;
|
||||||
|
struct ggml_tensor * patch_bias;
|
||||||
struct ggml_tensor * position_embeddings;
|
struct ggml_tensor * position_embeddings;
|
||||||
|
|
||||||
struct ggml_tensor * pre_ln_w;
|
struct ggml_tensor * pre_ln_w;
|
||||||
|
@ -501,6 +503,11 @@ struct clip_ctx {
|
||||||
bool use_gelu = false;
|
bool use_gelu = false;
|
||||||
int32_t ftype = 1;
|
int32_t ftype = 1;
|
||||||
|
|
||||||
|
bool has_class_embedding = true;
|
||||||
|
bool has_pre_norm = true;
|
||||||
|
bool has_post_norm = false;
|
||||||
|
bool has_patch_bias = false;
|
||||||
|
|
||||||
struct gguf_context * ctx_gguf;
|
struct gguf_context * ctx_gguf;
|
||||||
struct ggml_context * ctx_data;
|
struct ggml_context * ctx_data;
|
||||||
|
|
||||||
|
@ -526,7 +533,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
|
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
|
||||||
const int num_positions = num_patches + 1;
|
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||||
const int hidden_size = hparams.hidden_size;
|
const int hidden_size = hparams.hidden_size;
|
||||||
const int n_head = hparams.n_head;
|
const int n_head = hparams.n_head;
|
||||||
const int d_head = hidden_size / n_head;
|
const int d_head = hidden_size / n_head;
|
||||||
|
@ -557,16 +564,23 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
|
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
|
||||||
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
|
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
|
||||||
|
|
||||||
|
if (ctx->has_patch_bias) {
|
||||||
|
// inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
|
||||||
|
inp = ggml_add(ctx0, inp, model.patch_bias);
|
||||||
|
}
|
||||||
|
|
||||||
// concat class_embeddings and patch_embeddings
|
// concat class_embeddings and patch_embeddings
|
||||||
struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
struct ggml_tensor * embeddings = inp;
|
||||||
|
if (ctx->has_class_embedding) {
|
||||||
|
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
||||||
ggml_set_name(embeddings, "embeddings");
|
ggml_set_name(embeddings, "embeddings");
|
||||||
ggml_set_input(embeddings);
|
ggml_set_input(embeddings);
|
||||||
|
|
||||||
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
||||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
||||||
|
|
||||||
embeddings = ggml_acc(ctx0, embeddings, inp,
|
embeddings = ggml_acc(ctx0, embeddings, inp,
|
||||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
||||||
ggml_set_name(positions, "positions");
|
ggml_set_name(positions, "positions");
|
||||||
|
@ -576,7 +590,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
|
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
|
||||||
|
|
||||||
// pre-layernorm
|
// pre-layernorm
|
||||||
{
|
if (ctx->has_pre_norm) {
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
ggml_set_name(embeddings, "pre_ln");
|
ggml_set_name(embeddings, "pre_ln");
|
||||||
|
|
||||||
|
@ -664,6 +678,14 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
embeddings = cur;
|
embeddings = cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// post-layernorm
|
||||||
|
if (ctx->has_post_norm) {
|
||||||
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
|
ggml_set_name(embeddings, "post_ln");
|
||||||
|
|
||||||
|
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
|
||||||
|
}
|
||||||
|
|
||||||
// llava projector
|
// llava projector
|
||||||
{
|
{
|
||||||
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
||||||
|
@ -1149,11 +1171,38 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
|
||||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||||
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
new_clip->has_class_embedding = true;
|
||||||
|
} catch (const std::exception& e) {
|
||||||
|
new_clip->has_class_embedding = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||||
|
new_clip->has_pre_norm = true;
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
new_clip->has_pre_norm = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
|
||||||
|
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
|
||||||
|
new_clip->has_post_norm = true;
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
new_clip->has_post_norm = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
|
||||||
|
new_clip->has_patch_bias = true;
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
new_clip->has_patch_bias = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||||
|
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||||
} catch(const std::exception& e) {
|
} catch(const std::exception& e) {
|
||||||
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
||||||
}
|
}
|
||||||
|
@ -1797,7 +1846,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
const int image_size = hparams.image_size;
|
const int image_size = hparams.image_size;
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
const int num_positions = num_patches + 1;
|
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||||
|
|
||||||
{
|
{
|
||||||
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
||||||
|
@ -1825,6 +1874,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
if (ctx->has_class_embedding) {
|
||||||
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
||||||
|
|
||||||
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
||||||
|
@ -1832,6 +1882,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
||||||
free(zero_mem);
|
free(zero_mem);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
|
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
|
||||||
|
|
|
@ -68,7 +68,7 @@ CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8
|
||||||
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
||||||
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
||||||
|
|
||||||
/** preprocess img and store the result in res_imgs, pad_to_square may be overriden to false depending on model configuration */
|
/** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */
|
||||||
CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
|
CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
|
||||||
|
|
||||||
CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
|
CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
|
||||||
|
|
|
@ -189,6 +189,11 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
||||||
|
if (!ctx_sampling) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
std::string response = "";
|
std::string response = "";
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
for (int i = 0; i < max_tgt_len; i++) {
|
||||||
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
||||||
|
@ -285,7 +290,7 @@ int main(int argc, char ** argv) {
|
||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
|
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
|
||||||
gpt_print_usage(argc, argv, params);
|
gpt_params_print_usage(argc, argv, params);
|
||||||
show_additional_info(argc, argv);
|
show_additional_info(argc, argv);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -295,6 +300,19 @@ int main(int argc, char ** argv) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (prompt_contains_image(params.prompt)) {
|
||||||
|
auto ctx_llava = llava_init_context(¶ms, model);
|
||||||
|
|
||||||
|
auto image_embed = load_image(ctx_llava, ¶ms, "");
|
||||||
|
|
||||||
|
// process the prompt
|
||||||
|
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
||||||
|
|
||||||
|
llama_print_timings(ctx_llava->ctx_llama);
|
||||||
|
llava_image_embed_free(image_embed);
|
||||||
|
ctx_llava->model = NULL;
|
||||||
|
llava_free(ctx_llava);
|
||||||
|
} else {
|
||||||
for (auto & image : params.image) {
|
for (auto & image : params.image) {
|
||||||
auto ctx_llava = llava_init_context(¶ms, model);
|
auto ctx_llava = llava_init_context(¶ms, model);
|
||||||
|
|
||||||
|
@ -312,6 +330,8 @@ int main(int argc, char ** argv) {
|
||||||
ctx_llava->model = NULL;
|
ctx_llava->model = NULL;
|
||||||
llava_free(ctx_llava);
|
llava_free(ctx_llava);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -88,7 +88,6 @@ static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair<
|
||||||
// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out)
|
// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out)
|
||||||
static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *> & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) {
|
static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *> & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) {
|
||||||
struct {
|
struct {
|
||||||
struct ggml_tensor * newline;
|
|
||||||
struct ggml_context * ctx;
|
struct ggml_context * ctx;
|
||||||
} model;
|
} model;
|
||||||
|
|
||||||
|
@ -150,20 +149,6 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *>
|
||||||
|
|
||||||
model.ctx = ggml_init(params);
|
model.ctx = ggml_init(params);
|
||||||
|
|
||||||
ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip);
|
|
||||||
model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]);
|
|
||||||
if (newline_tmp->backend != GGML_BACKEND_TYPE_CPU) {
|
|
||||||
if (newline_tmp->buffer == NULL) {
|
|
||||||
LOG_TEE("newline_tmp tensor buffer is NULL\n");
|
|
||||||
}
|
|
||||||
ggml_backend_tensor_get(newline_tmp, model.newline->data, 0, ggml_nbytes(newline_tmp));
|
|
||||||
} else {
|
|
||||||
model.newline->data = newline_tmp->data;
|
|
||||||
if (model.newline->data == NULL) {
|
|
||||||
LOG_TEE("newline_tmp tensor data is NULL\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4
|
struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4
|
||||||
// ggml_tensor_printf(image_features,"image_features",__LINE__,false,false);
|
// ggml_tensor_printf(image_features,"image_features",__LINE__,false,false);
|
||||||
// fill it with the image embeddings, ignoring the base
|
// fill it with the image embeddings, ignoring the base
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
-r ../../requirements/requirements-convert.txt
|
-r ../../requirements/requirements-convert-legacy-llama.txt
|
||||||
pillow~=10.2.0
|
pillow~=10.2.0
|
||||||
torch~=2.1.1
|
torch~=2.1.1
|
||||||
|
|
|
@ -174,7 +174,7 @@ int main(int argc, char ** argv) {
|
||||||
// debug
|
// debug
|
||||||
if (dump_kv_cache) {
|
if (dump_kv_cache) {
|
||||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
dump_kv_cache_view_seqs(kvc_view, 40);
|
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||||
}
|
}
|
||||||
|
|
||||||
// build the mask from https://lmsys.org/blog/2023-11-21-lookahead-decoding/
|
// build the mask from https://lmsys.org/blog/2023-11-21-lookahead-decoding/
|
||||||
|
|
|
@ -121,7 +121,7 @@ int main(int argc, char ** argv){
|
||||||
// debug
|
// debug
|
||||||
if (dump_kv_cache) {
|
if (dump_kv_cache) {
|
||||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
dump_kv_cache_view_seqs(kvc_view, 40);
|
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||||
}
|
}
|
||||||
|
|
||||||
// print current draft sequence
|
// print current draft sequence
|
||||||
|
|
|
@ -325,3 +325,5 @@ These options provide extra functionality and customization when running the LLa
|
||||||
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance.
|
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance.
|
||||||
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
||||||
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
||||||
|
|
||||||
|
- `-hfr URL --hf-repo URL`: The url to the Hugging Face model repository. Used in conjunction with `--hf-file` or `-hff`. The model is downloaded and stored in the file provided by `-m` or `--model`. If `-m` is not provided, the model is auto-stored in the path specified by the `LLAMA_CACHE` environment variable or in an OS-specific local cache.
|
||||||
|
|
|
@ -60,9 +60,9 @@ static void write_logfile(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string timestamp = get_sortable_timestamp();
|
const std::string timestamp = string_get_sortable_timestamp();
|
||||||
|
|
||||||
const bool success = create_directory_with_parents(params.logdir);
|
const bool success = fs_create_directory_with_parents(params.logdir);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
||||||
__func__, params.logdir.c_str());
|
__func__, params.logdir.c_str());
|
||||||
|
@ -80,7 +80,7 @@ static void write_logfile(
|
||||||
fprintf(logfile, "binary: main\n");
|
fprintf(logfile, "binary: main\n");
|
||||||
char model_desc[128];
|
char model_desc[128];
|
||||||
llama_model_desc(model, model_desc, sizeof(model_desc));
|
llama_model_desc(model, model_desc, sizeof(model_desc));
|
||||||
dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc);
|
yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc);
|
||||||
|
|
||||||
fprintf(logfile, "\n");
|
fprintf(logfile, "\n");
|
||||||
fprintf(logfile, "######################\n");
|
fprintf(logfile, "######################\n");
|
||||||
|
@ -88,8 +88,8 @@ static void write_logfile(
|
||||||
fprintf(logfile, "######################\n");
|
fprintf(logfile, "######################\n");
|
||||||
fprintf(logfile, "\n");
|
fprintf(logfile, "\n");
|
||||||
|
|
||||||
dump_string_yaml_multiline(logfile, "output", output.c_str());
|
yaml_dump_string_multiline(logfile, "output", output.c_str());
|
||||||
dump_vector_int_yaml(logfile, "output_tokens", output_tokens);
|
yaml_dump_vector_int(logfile, "output_tokens", output_tokens);
|
||||||
|
|
||||||
llama_dump_timing_info_yaml(logfile, ctx);
|
llama_dump_timing_info_yaml(logfile, ctx);
|
||||||
fclose(logfile);
|
fclose(logfile);
|
||||||
|
@ -181,7 +181,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
if (params.random_prompt) {
|
if (params.random_prompt) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = string_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("%s: llama backend init\n", __func__);
|
LOG("%s: llama backend init\n", __func__);
|
||||||
|
@ -219,7 +219,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
LOG_TEE("%s\n", get_system_info(params).c_str());
|
LOG_TEE("%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string path_session = params.path_prompt_cache;
|
std::string path_session = params.path_prompt_cache;
|
||||||
|
@ -474,12 +474,12 @@ int main(int argc, char ** argv) {
|
||||||
LOG_TEE("\n\n");
|
LOG_TEE("\n\n");
|
||||||
|
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
const char *control_message;
|
const char * control_message;
|
||||||
if (params.multiline_input) {
|
if (params.multiline_input) {
|
||||||
control_message = " - To return control to LLaMa, end your input with '\\'.\n"
|
control_message = " - To return control to the AI, end your input with '\\'.\n"
|
||||||
" - To return control without starting a new line, end your input with '/'.\n";
|
" - To return control without starting a new line, end your input with '/'.\n";
|
||||||
} else {
|
} else {
|
||||||
control_message = " - Press Return to return control to LLaMa.\n"
|
control_message = " - Press Return to return control to the AI.\n"
|
||||||
" - To return control without starting a new line, end your input with '/'.\n"
|
" - To return control without starting a new line, end your input with '/'.\n"
|
||||||
" - If you want to submit another line, end your input with '\\'.\n";
|
" - If you want to submit another line, end your input with '\\'.\n";
|
||||||
}
|
}
|
||||||
|
@ -523,6 +523,10 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
|
||||||
|
if (!ctx_sampling) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
||||||
// predict
|
// predict
|
||||||
|
@ -703,7 +707,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
|
const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
|
||||||
|
|
||||||
llama_sampling_accept(ctx_sampling, ctx, id, true);
|
llama_sampling_accept(ctx_sampling, ctx, id, /* apply_grammar= */ true);
|
||||||
|
|
||||||
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
|
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
|
||||||
|
|
||||||
|
@ -724,7 +728,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// push the prompt in the sampling context in order to apply repetition penalties later
|
// push the prompt in the sampling context in order to apply repetition penalties later
|
||||||
// for the prompt, we don't apply grammar rules
|
// for the prompt, we don't apply grammar rules
|
||||||
llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], false);
|
llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], /* apply_grammar= */ false);
|
||||||
|
|
||||||
++n_consumed;
|
++n_consumed;
|
||||||
if ((int) embd.size() >= params.n_batch) {
|
if ((int) embd.size() >= params.n_batch) {
|
||||||
|
@ -736,18 +740,26 @@ int main(int argc, char ** argv) {
|
||||||
// display text
|
// display text
|
||||||
if (input_echo && display) {
|
if (input_echo && display) {
|
||||||
for (auto id : embd) {
|
for (auto id : embd) {
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id, !params.conversation);
|
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
|
||||||
printf("%s", token_str.c_str());
|
|
||||||
|
|
||||||
|
// Console/Stream Output
|
||||||
|
fprintf(stdout, "%s", token_str.c_str());
|
||||||
|
|
||||||
|
// Record Displayed Tokens To Log
|
||||||
|
// Note: Generated tokens are created one by one hence this check
|
||||||
if (embd.size() > 1) {
|
if (embd.size() > 1) {
|
||||||
|
// Incoming Requested Tokens
|
||||||
input_tokens.push_back(id);
|
input_tokens.push_back(id);
|
||||||
} else {
|
} else {
|
||||||
|
// Outgoing Generated Tokens
|
||||||
output_tokens.push_back(id);
|
output_tokens.push_back(id);
|
||||||
output_ss << token_str;
|
output_ss << token_str;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// reset color to default if there is no pending user input
|
// reset color to default if there is no pending user input
|
||||||
if (input_echo && (int) embd_inp.size() == n_consumed) {
|
if (input_echo && (int) embd_inp.size() == n_consumed) {
|
||||||
console::set_display(console::reset);
|
console::set_display(console::reset);
|
||||||
|
@ -875,11 +887,11 @@ int main(int argc, char ** argv) {
|
||||||
embd_inp.insert(embd_inp.end(), cml_pfx.begin(), cml_pfx.end());
|
embd_inp.insert(embd_inp.end(), cml_pfx.begin(), cml_pfx.end());
|
||||||
}
|
}
|
||||||
if (params.escape) {
|
if (params.escape) {
|
||||||
process_escapes(buffer);
|
string_process_escapes(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false, params.interactive_specials);
|
||||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||||
|
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
|
|
@ -1,98 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
This script converts Hugging Face Llama, StarCoder, Falcon, Baichuan, and GPT-NeoX models to GGUF and quantizes them.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
python make-ggml.py {model_dir_or_hf_repo_name} --model_type {model_type} [--outname {output_name} (Optional)] [--outdir {output_directory} (Optional)] [--quants {quant_types} (Optional)] [--keep_fp16 (Optional)]
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
- model: (Required) The directory of the downloaded Hugging Face model or the name of the Hugging Face model repository. If the model directory does not exist, it will be downloaded from the Hugging Face model hub.
|
|
||||||
- --model_type: (Required) The type of the model to be converted. Choose from llama, starcoder, falcon, baichuan, or gptneox.
|
|
||||||
- --outname: (Optional) The name of the output model. If not specified, the last part of the model directory path or the Hugging Face model repo name will be used.
|
|
||||||
- --outdir: (Optional) The directory where the output model(s) will be stored. If not specified, '../models/{outname}' will be used.
|
|
||||||
- --quants: (Optional) The types of quantization to apply. This should be a space-separated list. The default is 'Q4_K_M Q5_K_S'.
|
|
||||||
- --keep_fp16: (Optional) If specified, the FP16 model will not be deleted after the quantized models are created.
|
|
||||||
|
|
||||||
Old quant types (some base model types require these):
|
|
||||||
- Q4_0: small, very high quality loss - legacy, prefer using Q3_K_M
|
|
||||||
- Q4_1: small, substantial quality loss - legacy, prefer using Q3_K_L
|
|
||||||
- Q5_0: medium, balanced quality - legacy, prefer using Q4_K_M
|
|
||||||
- Q5_1: medium, low quality loss - legacy, prefer using Q5_K_M
|
|
||||||
|
|
||||||
New quant types (recommended):
|
|
||||||
- Q2_K: smallest, extreme quality loss - not recommended
|
|
||||||
- Q3_K: alias for Q3_K_M
|
|
||||||
- Q3_K_S: very small, very high quality loss
|
|
||||||
- Q3_K_M: very small, very high quality loss
|
|
||||||
- Q3_K_L: small, substantial quality loss
|
|
||||||
- Q4_K: alias for Q4_K_M
|
|
||||||
- Q4_K_S: small, significant quality loss
|
|
||||||
- Q4_K_M: medium, balanced quality - recommended
|
|
||||||
- Q5_K: alias for Q5_K_M
|
|
||||||
- Q5_K_S: large, low quality loss - recommended
|
|
||||||
- Q5_K_M: large, very low quality loss - recommended
|
|
||||||
- Q6_K: very large, extremely low quality loss
|
|
||||||
- Q8_0: very large, extremely low quality loss - not recommended
|
|
||||||
- F16: extremely large, virtually no quality loss - not recommended
|
|
||||||
- F32: absolutely huge, lossless - not recommended
|
|
||||||
"""
|
|
||||||
import subprocess
|
|
||||||
subprocess.run(f"pip install huggingface-hub==0.16.4", shell=True, check=True)
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
|
|
||||||
def main(model, model_type, outname, outdir, quants, keep_fp16):
|
|
||||||
if not os.path.isdir(model):
|
|
||||||
print(f"Model not found at {model}. Downloading...")
|
|
||||||
try:
|
|
||||||
if outname is None:
|
|
||||||
outname = model.split('/')[-1]
|
|
||||||
model = snapshot_download(repo_id=model, cache_dir='../models/hf_cache')
|
|
||||||
except Exception as e:
|
|
||||||
raise Exception(f"Could not download the model: {e}")
|
|
||||||
|
|
||||||
if outdir is None:
|
|
||||||
outdir = f'../models/{outname}'
|
|
||||||
|
|
||||||
if not os.path.isfile(f"{model}/config.json"):
|
|
||||||
raise Exception(f"Could not find config.json in {model}")
|
|
||||||
|
|
||||||
os.makedirs(outdir, exist_ok=True)
|
|
||||||
|
|
||||||
print("Building llama.cpp")
|
|
||||||
subprocess.run(f"cd .. && make quantize", shell=True, check=True)
|
|
||||||
|
|
||||||
fp16 = f"{outdir}/{outname}.gguf.fp16.bin"
|
|
||||||
|
|
||||||
print(f"Making unquantised GGUF at {fp16}")
|
|
||||||
if not os.path.isfile(fp16):
|
|
||||||
if model_type != "llama":
|
|
||||||
subprocess.run(f"python3 ../convert-{model_type}-hf-to-gguf.py {model} 1 --outfile {fp16}", shell=True, check=True)
|
|
||||||
else:
|
|
||||||
subprocess.run(f"python3 ../convert.py {model} --outtype f16 --outfile {fp16}", shell=True, check=True)
|
|
||||||
else:
|
|
||||||
print(f"Unquantised GGML already exists at: {fp16}")
|
|
||||||
|
|
||||||
print("Making quants")
|
|
||||||
for type in quants:
|
|
||||||
outfile = f"{outdir}/{outname}.gguf.{type}.bin"
|
|
||||||
print(f"Making {type} : {outfile}")
|
|
||||||
subprocess.run(f"../quantize {fp16} {outfile} {type}", shell=True, check=True)
|
|
||||||
|
|
||||||
if not keep_fp16:
|
|
||||||
os.remove(fp16)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(description='Convert/Quantize HF models to GGUF. If you have the HF model downloaded already, pass the path to the model dir. Otherwise, pass the Hugging Face model repo name. You need to be in the /examples folder for it to work.')
|
|
||||||
parser.add_argument('model', help='Downloaded model dir or Hugging Face model repo name')
|
|
||||||
parser.add_argument('--model_type', required=True, choices=['llama', 'starcoder', 'falcon', 'baichuan', 'gptneox'], help='Type of the model to be converted. Choose from llama, starcoder, falcon, baichuan, or gptneox.')
|
|
||||||
parser.add_argument('--outname', default=None, help='Output model(s) name')
|
|
||||||
parser.add_argument('--outdir', default=None, help='Output directory')
|
|
||||||
parser.add_argument('--quants', nargs='*', default=["Q4_K_M", "Q5_K_S"], help='Quant types')
|
|
||||||
parser.add_argument('--keep_fp16', action='store_true', help='Keep fp16 model', default=False)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
main(args.model, args.model_type, args.outname, args.outdir, args.quants, args.keep_fp16)
|
|
|
@ -210,7 +210,7 @@ int main(int argc, char ** argv) {
|
||||||
while (true) {
|
while (true) {
|
||||||
if (dump_kv_cache) {
|
if (dump_kv_cache) {
|
||||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
dump_kv_cache_view_seqs(kvc_view, 40);
|
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
|
@ -7,6 +7,8 @@ Also note that finetunes typically result in a higher perplexity value even thou
|
||||||
|
|
||||||
Within llama.cpp the perplexity of base models is used primarily to judge the quality loss from e.g. quantized models vs. FP16.
|
Within llama.cpp the perplexity of base models is used primarily to judge the quality loss from e.g. quantized models vs. FP16.
|
||||||
The convention among contributors is to use the Wikitext-2 test set for testing unless noted otherwise (can be obtained with `scripts/get-wikitext-2.sh`).
|
The convention among contributors is to use the Wikitext-2 test set for testing unless noted otherwise (can be obtained with `scripts/get-wikitext-2.sh`).
|
||||||
|
When numbers are listed all command line arguments and compilation options are left at their defaults unless noted otherwise.
|
||||||
|
llama.cpp numbers are **not** directly comparable to those of other projects because the exact values depend strongly on the implementation details.
|
||||||
|
|
||||||
By default only the mean perplexity value and the corresponding uncertainty is calculated.
|
By default only the mean perplexity value and the corresponding uncertainty is calculated.
|
||||||
The uncertainty is determined empirically by assuming a Gaussian distribution of the "correct" logits per and then applying error propagation.
|
The uncertainty is determined empirically by assuming a Gaussian distribution of the "correct" logits per and then applying error propagation.
|
||||||
|
@ -32,12 +34,21 @@ In addition to the KL divergence the following statistics are calculated with `-
|
||||||
|
|
||||||
## LLaMA 3 8b Scoreboard
|
## LLaMA 3 8b Scoreboard
|
||||||
|
|
||||||
Results are sorted by Kullback-Leibler divergence relative to FP16.
|
| Revision | f364eb6f |
|
||||||
|
|:---------|:-------------------|
|
||||||
|
| Backend | CUDA |
|
||||||
|
| CPU | AMD Epyc 7742 |
|
||||||
|
| GPU | 1x NVIDIA RTX 4090 |
|
||||||
|
|
||||||
|
Results were generated using the CUDA backend and are sorted by Kullback-Leibler divergence relative to FP16.
|
||||||
The "WT" importance matrices were created using varying numbers of Wikitext tokens and can be found [here](https://huggingface.co/JohannesGaessler/llama.cpp_importance_matrices/blob/main/imatrix-llama_3-8b-f16-2.7m_tokens.dat).
|
The "WT" importance matrices were created using varying numbers of Wikitext tokens and can be found [here](https://huggingface.co/JohannesGaessler/llama.cpp_importance_matrices/blob/main/imatrix-llama_3-8b-f16-2.7m_tokens.dat).
|
||||||
|
Note: the FP16 logits used for the calculation of all metrics other than perplexity are stored in a binary file between runs.
|
||||||
|
In order to save space this file does **not** contain the exact same FP32 logits but instead casts them to 16 bit unsigned integers (with some scaling).
|
||||||
|
So the "f16" results are to be understood as the difference resulting only from this downcast.
|
||||||
|
|
||||||
| Quantization | imatrix | Model size [GiB] | PPL | ΔPPL | KLD | Mean Δp | RMS Δp |
|
| Quantization | imatrix | Model size [GiB] | PPL | ΔPPL | KLD | Mean Δp | RMS Δp |
|
||||||
|--------------|---------|------------------|------------------------|------------------------|-----------------------|-------------------|------------------|
|
|--------------|---------|------------------|------------------------|------------------------|-----------------------|-------------------|------------------|
|
||||||
| f16 | None | 14.97 | 6.233160 ± 0.037828 | - | - | - | - |
|
| f16 | None | 14.97 | 6.233160 ± 0.037828 | 0.001524 ± 0.000755 | 0.000551 ± 0.000002 | 0.001 ± 0.002 % | 0.787 ± 0.004 % |
|
||||||
| q8_0 | None | 7.96 | 6.234284 ± 0.037878 | 0.002650 ± 0.001006 | 0.001355 ± 0.000006 | -0.019 ± 0.003 % | 1.198 ± 0.007 % |
|
| q8_0 | None | 7.96 | 6.234284 ± 0.037878 | 0.002650 ± 0.001006 | 0.001355 ± 0.000006 | -0.019 ± 0.003 % | 1.198 ± 0.007 % |
|
||||||
| q6_K | None | 6.14 | 6.253382 ± 0.038078 | 0.021748 ± 0.001852 | 0.005452 ± 0.000035 | -0.007 ± 0.006 % | 2.295 ± 0.019 % |
|
| q6_K | None | 6.14 | 6.253382 ± 0.038078 | 0.021748 ± 0.001852 | 0.005452 ± 0.000035 | -0.007 ± 0.006 % | 2.295 ± 0.019 % |
|
||||||
| q5_K_M | None | 5.33 | 6.288607 ± 0.038338 | 0.056974 ± 0.002598 | 0.010762 ± 0.000079 | -0.114 ± 0.008 % | 3.160 ± 0.031 % |
|
| q5_K_M | None | 5.33 | 6.288607 ± 0.038338 | 0.056974 ± 0.002598 | 0.010762 ± 0.000079 | -0.114 ± 0.008 % | 3.160 ± 0.031 % |
|
||||||
|
@ -89,6 +100,12 @@ K-quants score better on mean Δp than the legacy quants than e.g. KL divergence
|
||||||
|
|
||||||
## LLaMA 2 vs. LLaMA 3 Quantization comparison
|
## LLaMA 2 vs. LLaMA 3 Quantization comparison
|
||||||
|
|
||||||
|
| Revision | f364eb6f |
|
||||||
|
|:---------|:-------------------|
|
||||||
|
| Backend | CUDA |
|
||||||
|
| CPU | AMD Epyc 7742 |
|
||||||
|
| GPU | 1x NVIDIA RTX 4090 |
|
||||||
|
|
||||||
| Metric | L2 7b q2_K | L3 8b q2_K | L2 7b q4_K_M | L3 8b q4_K_M | L2 7b q6_K | L3 8b q6_K | L2 7b q8_0 | L3 8b q8_0 |
|
| Metric | L2 7b q2_K | L3 8b q2_K | L2 7b q4_K_M | L3 8b q4_K_M | L2 7b q6_K | L3 8b q6_K | L2 7b q8_0 | L3 8b q8_0 |
|
||||||
|-----------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|
|
|-----------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|
|
||||||
| Mean PPL | 5.794552 ± 0.032298 | 9.751568 ± 0.063312 | 5.877078 ± 0.032781 | 6.407115 ± 0.039119 | 5.808494 ± 0.032425 | 6.253382 ± 0.038078 | 5.798542 ± 0.032366 | 6.234284 ± 0.037878 |
|
| Mean PPL | 5.794552 ± 0.032298 | 9.751568 ± 0.063312 | 5.877078 ± 0.032781 | 6.407115 ± 0.039119 | 5.808494 ± 0.032425 | 6.253382 ± 0.038078 | 5.798542 ± 0.032366 | 6.234284 ± 0.037878 |
|
||||||
|
@ -107,6 +124,50 @@ K-quants score better on mean Δp than the legacy quants than e.g. KL divergence
|
||||||
| RMS Δp | 9.762 ± 0.053 % | 21.421 ± 0.079 % | 3.252 ± 0.024 % | 5.519 ± 0.050 % | 1.339 ± 0.010 % | 2.295 ± 0.019 % | 0.618 ± 0.011 % | 1.198 ± 0.007 % |
|
| RMS Δp | 9.762 ± 0.053 % | 21.421 ± 0.079 % | 3.252 ± 0.024 % | 5.519 ± 0.050 % | 1.339 ± 0.010 % | 2.295 ± 0.019 % | 0.618 ± 0.011 % | 1.198 ± 0.007 % |
|
||||||
| Same top p | 85.584 ± 0.086 % | 71.138 ± 0.119 % | 94.665 ± 0.055 % | 91.901 ± 0.072 % | 97.520 ± 0.038 % | 96.031 ± 0.051 % | 98.846 ± 0.026 % | 97.674 ± 0.040 % |
|
| Same top p | 85.584 ± 0.086 % | 71.138 ± 0.119 % | 94.665 ± 0.055 % | 91.901 ± 0.072 % | 97.520 ± 0.038 % | 96.031 ± 0.051 % | 98.846 ± 0.026 % | 97.674 ± 0.040 % |
|
||||||
|
|
||||||
|
## LLaMA 3 BF16 vs. FP16 comparison
|
||||||
|
|
||||||
|
| Revision | 83330d8c |
|
||||||
|
|:---------|:--------------|
|
||||||
|
| Backend | CPU |
|
||||||
|
| CPU | AMD Epyc 7742 |
|
||||||
|
| GPU | N/A |
|
||||||
|
|
||||||
|
Results were calculated with LLaMA 3 8b BF16 as `--kl-divergence-base` and LLaMA 3 8b FP16 as the `--model` for comparison.
|
||||||
|
|
||||||
|
| Metric | Value |
|
||||||
|
|--------------------------------|--------------------------|
|
||||||
|
| Mean PPL(Q) | 6.227711 ± 0.037833 |
|
||||||
|
| Mean PPL(base) | 6.225194 ± 0.037771 |
|
||||||
|
| Cor(ln(PPL(Q)), ln(PPL(base))) | 99.990% |
|
||||||
|
| Mean ln(PPL(Q)/PPL(base)) | 0.000404 ± 0.000086 |
|
||||||
|
| Mean PPL(Q)/PPL(base) | 1.000404 ± 0.000086 |
|
||||||
|
| Mean PPL(Q)-PPL(base) | 0.002517 ± 0.000536 |
|
||||||
|
| Mean KLD | 0.00002515 ± 0.00000020 |
|
||||||
|
| Maximum KLD | 0.012206 |
|
||||||
|
| 99.9% KLD | 0.000799 |
|
||||||
|
| 99.0% KLD | 0.000222 |
|
||||||
|
| 99.0% KLD | 0.000222 |
|
||||||
|
| Median KLD | 0.000013 |
|
||||||
|
| 10.0% KLD | -0.000002 |
|
||||||
|
| 5.0% KLD | -0.000008 |
|
||||||
|
| 1.0% KLD | -0.000023 |
|
||||||
|
| Minimum KLD | -0.000059 |
|
||||||
|
| Mean Δp | -0.0000745 ± 0.0003952 % |
|
||||||
|
| Maximum Δp | 4.186% |
|
||||||
|
| 99.9% Δp | 1.049% |
|
||||||
|
| 99.0% Δp | 0.439% |
|
||||||
|
| 95.0% Δp | 0.207% |
|
||||||
|
| 90.0% Δp | 0.125% |
|
||||||
|
| 75.0% Δp | 0.029% |
|
||||||
|
| Median Δp | 0.000% |
|
||||||
|
| 25.0% Δp | -0.030% |
|
||||||
|
| 10.0% Δp | -0.126% |
|
||||||
|
| 5.0% Δp | -0.207% |
|
||||||
|
| 1.0% Δp | -0.434% |
|
||||||
|
| 0.1% Δp | -1.016% |
|
||||||
|
| Minimum Δp | -4.672% |
|
||||||
|
| RMS Δp | 0.150 ± 0.001 % |
|
||||||
|
| Same top p | 99.739 ± 0.013 % |
|
||||||
|
|
||||||
## Old Numbers
|
## Old Numbers
|
||||||
|
|
||||||
|
|
|
@ -44,9 +44,9 @@ static void write_logfile(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string timestamp = get_sortable_timestamp();
|
const std::string timestamp = string_get_sortable_timestamp();
|
||||||
|
|
||||||
const bool success = create_directory_with_parents(params.logdir);
|
const bool success = fs_create_directory_with_parents(params.logdir);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
||||||
__func__, params.logdir.c_str());
|
__func__, params.logdir.c_str());
|
||||||
|
@ -64,7 +64,7 @@ static void write_logfile(
|
||||||
fprintf(logfile, "binary: main\n");
|
fprintf(logfile, "binary: main\n");
|
||||||
char model_desc[128];
|
char model_desc[128];
|
||||||
llama_model_desc(model, model_desc, sizeof(model_desc));
|
llama_model_desc(model, model_desc, sizeof(model_desc));
|
||||||
dump_non_result_info_yaml(logfile, params, ctx, timestamp, results.tokens, model_desc);
|
yaml_dump_non_result_info(logfile, params, ctx, timestamp, results.tokens, model_desc);
|
||||||
|
|
||||||
fprintf(logfile, "\n");
|
fprintf(logfile, "\n");
|
||||||
fprintf(logfile, "######################\n");
|
fprintf(logfile, "######################\n");
|
||||||
|
@ -72,9 +72,9 @@ static void write_logfile(
|
||||||
fprintf(logfile, "######################\n");
|
fprintf(logfile, "######################\n");
|
||||||
fprintf(logfile, "\n");
|
fprintf(logfile, "\n");
|
||||||
|
|
||||||
dump_vector_float_yaml(logfile, "logits", results.logits);
|
yaml_dump_vector_float(logfile, "logits", results.logits);
|
||||||
fprintf(logfile, "ppl_value: %f\n", results.ppl_value);
|
fprintf(logfile, "ppl_value: %f\n", results.ppl_value);
|
||||||
dump_vector_float_yaml(logfile, "probs", results.probs);
|
yaml_dump_vector_float(logfile, "probs", results.probs);
|
||||||
|
|
||||||
llama_dump_timing_info_yaml(logfile, ctx);
|
llama_dump_timing_info_yaml(logfile, ctx);
|
||||||
fclose(logfile);
|
fclose(logfile);
|
||||||
|
@ -1425,7 +1425,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||||
// Use all tasks
|
// Use all tasks
|
||||||
tasks.resize(n_task);
|
tasks.resize(n_task);
|
||||||
printf("%s: reading tasks", __func__);
|
printf("%s: reading tasks", __func__);
|
||||||
int n_dot = n_task/100;
|
int n_dot = std::max((int) n_task/100, 1);
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (auto& task : tasks) {
|
for (auto& task : tasks) {
|
||||||
++i;
|
++i;
|
||||||
|
@ -1675,7 +1675,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||||
|
|
||||||
llama_batch_free(batch);
|
llama_batch_free(batch);
|
||||||
|
|
||||||
if (n_done < 100) return;
|
if (n_done < 100 && (params.multiple_choice_tasks != 0 && params.multiple_choice_tasks < (size_t)n_task)) return;
|
||||||
|
|
||||||
float p = 1.f*n_correct/n_done;
|
float p = 1.f*n_correct/n_done;
|
||||||
float sigma = sqrt(p*(1-p)/(n_done-1));
|
float sigma = sqrt(p*(1-p)/(n_done-1));
|
||||||
|
@ -2007,7 +2007,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
if (params.random_prompt) {
|
if (params.random_prompt) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = string_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
|
@ -2035,7 +2035,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
struct results_perplexity results;
|
struct results_perplexity results;
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
# quantize
|
# quantize
|
||||||
|
|
||||||
TODO
|
You can also use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to build your own quants without any setup.
|
||||||
|
|
||||||
|
Note: It is synced from llama.cpp `main` every 6 hours.
|
||||||
|
|
||||||
## Llama 2 7B
|
## Llama 2 7B
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,7 @@ int main(int argc, char ** argv) {
|
||||||
usage(argv[0]);
|
usage(argv[0]);
|
||||||
}
|
}
|
||||||
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
|
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
|
||||||
if (arg_idx == argc-1 || !parse_kv_override(argv[++arg_idx], kv_overrides)) {
|
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
|
||||||
usage(argv[0]);
|
usage(argv[0]);
|
||||||
}
|
}
|
||||||
} else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
|
} else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
|
||||||
|
@ -284,7 +284,7 @@ int main(int argc, char ** argv) {
|
||||||
} else {
|
} else {
|
||||||
usage(argv[0]);
|
usage(argv[0]);
|
||||||
}
|
}
|
||||||
} else if (strcmp(argv[arg_idx], "--keep-split")) {
|
} else if (strcmp(argv[arg_idx], "--keep-split") == 0) {
|
||||||
params.keep_split = true;
|
params.keep_split = true;
|
||||||
} else {
|
} else {
|
||||||
usage(argv[0]);
|
usage(argv[0]);
|
||||||
|
|
|
@ -41,8 +41,8 @@ $SPLIT --split-max-tensors 28 $WORK_PATH/gemma-1.1-2b-it.Q8_0.gguf $WORK_PATH/g
|
||||||
echo PASS
|
echo PASS
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# 3. Requant model with '--keep_split'
|
# 3. Requant model with '--keep-split'
|
||||||
$QUANTIZE --allow-requantize --keep_split $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-requant.gguf Q4_K
|
$QUANTIZE --allow-requantize --keep-split $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-requant.gguf Q4_K
|
||||||
echo PASS
|
echo PASS
|
||||||
echo
|
echo
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ $MAIN --model $WORK_PATH/ggml-model-requant-00001-of-00006.gguf --random-prompt
|
||||||
echo PASS
|
echo PASS
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# 4. Requant mode without '--keep_split'
|
# 4. Requant mode without '--keep-split'
|
||||||
$QUANTIZE --allow-requantize $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-requant-merge.gguf Q4_K
|
$QUANTIZE --allow-requantize $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-requant-merge.gguf Q4_K
|
||||||
echo PASS
|
echo PASS
|
||||||
echo
|
echo
|
||||||
|
|
|
@ -11,7 +11,7 @@ struct retrieval_params {
|
||||||
};
|
};
|
||||||
|
|
||||||
static void retrieval_params_print_usage(int argc, char ** argv, gpt_params & gpt_params, retrieval_params & params) {
|
static void retrieval_params_print_usage(int argc, char ** argv, gpt_params & gpt_params, retrieval_params & params) {
|
||||||
gpt_print_usage(argc, argv, gpt_params);
|
gpt_params_print_usage(argc, argv, gpt_params);
|
||||||
printf("retrieval options:\n");
|
printf("retrieval options:\n");
|
||||||
printf(" --context-file FNAME file containing context to embed.\n");
|
printf(" --context-file FNAME file containing context to embed.\n");
|
||||||
printf(" specify multiple files by providing --context-file option multiple times.\n");
|
printf(" specify multiple files by providing --context-file option multiple times.\n");
|
||||||
|
@ -226,7 +226,7 @@ int main(int argc, char ** argv) {
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// max batch size
|
// max batch size
|
||||||
|
|
2
examples/rpc/CMakeLists.txt
Normal file
2
examples/rpc/CMakeLists.txt
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
add_executable(rpc-server rpc-server.cpp)
|
||||||
|
target_link_libraries(rpc-server PRIVATE ggml llama)
|
74
examples/rpc/README.md
Normal file
74
examples/rpc/README.md
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The `rpc-server` allows running `ggml` backend on a remote host.
|
||||||
|
The RPC backend communicates with one or several instances of `rpc-server` and offloads computations to them.
|
||||||
|
This can be used for distributed LLM inference with `llama.cpp` in the following way:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
rpcb---|TCP|srva
|
||||||
|
rpcb---|TCP|srvb
|
||||||
|
rpcb-.-|TCP|srvn
|
||||||
|
subgraph hostn[Host N]
|
||||||
|
srvn[rpc-server]-.-backend3["Backend (CUDA,Metal,etc.)"]
|
||||||
|
end
|
||||||
|
subgraph hostb[Host B]
|
||||||
|
srvb[rpc-server]---backend2["Backend (CUDA,Metal,etc.)"]
|
||||||
|
end
|
||||||
|
subgraph hosta[Host A]
|
||||||
|
srva[rpc-server]---backend["Backend (CUDA,Metal,etc.)"]
|
||||||
|
end
|
||||||
|
subgraph host[Main Host]
|
||||||
|
ggml[llama.cpp]---rpcb[RPC backend]
|
||||||
|
end
|
||||||
|
style hostn stroke:#66,stroke-width:2px,stroke-dasharray: 5 5
|
||||||
|
```
|
||||||
|
|
||||||
|
Each host can run a different backend, e.g. one with CUDA and another with Metal.
|
||||||
|
You can also run multiple `rpc-server` instances on the same host, each with a different backend.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
On each host, build the corresponding backend with `cmake` and add `-DLLAMA_RPC=ON` to the build options.
|
||||||
|
For example, to build the CUDA backend with RPC support:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir build-rpc-cuda
|
||||||
|
cd build-rpc-cuda
|
||||||
|
cmake .. -DLLAMA_CUDA=ON -DLLAMA_RPC=ON
|
||||||
|
cmake --build . --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, start the `rpc-server` with the backend:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ bin/rpc-server -p 50052
|
||||||
|
create_backend: using CUDA backend
|
||||||
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
||||||
|
ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes
|
||||||
|
ggml_cuda_init: found 1 CUDA devices:
|
||||||
|
Device 0: NVIDIA T1200 Laptop GPU, compute capability 7.5, VMM: yes
|
||||||
|
Starting RPC server on 0.0.0.0:50052
|
||||||
|
```
|
||||||
|
|
||||||
|
When using the CUDA backend, you can specify the device with the `CUDA_VISIBLE_DEVICES` environment variable, e.g.:
|
||||||
|
```bash
|
||||||
|
$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server -p 50052
|
||||||
|
```
|
||||||
|
This way you can run multiple `rpc-server` instances on the same host, each with a different CUDA device.
|
||||||
|
|
||||||
|
|
||||||
|
On the main host build `llama.cpp` only with `-DLLAMA_RPC=ON`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir build-rpc
|
||||||
|
cd build-rpc
|
||||||
|
cmake .. -DLLAMA_RPC=ON
|
||||||
|
cmake --build . --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, use the `--rpc` option to specify the host and port of each `rpc-server`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ bin/main -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99
|
||||||
|
```
|
134
examples/rpc/rpc-server.cpp
Normal file
134
examples/rpc/rpc-server.cpp
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
#ifdef GGML_USE_CUDA
|
||||||
|
#include "ggml-cuda.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
#include "ggml-metal.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "ggml-rpc.h"
|
||||||
|
#ifdef _WIN32
|
||||||
|
# include <windows.h>
|
||||||
|
#else
|
||||||
|
# include <unistd.h>
|
||||||
|
#endif
|
||||||
|
#include <string>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
struct rpc_server_params {
|
||||||
|
std::string host = "0.0.0.0";
|
||||||
|
int port = 50052;
|
||||||
|
size_t backend_mem = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
|
||||||
|
fprintf(stderr, "Usage: %s [options]\n\n", argv[0]);
|
||||||
|
fprintf(stderr, "options:\n");
|
||||||
|
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||||
|
fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
|
||||||
|
fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
|
||||||
|
fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & params) {
|
||||||
|
std::string arg;
|
||||||
|
for (int i = 1; i < argc; i++) {
|
||||||
|
arg = argv[i];
|
||||||
|
if (arg == "-H" || arg == "--host") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
params.host = argv[i];
|
||||||
|
} else if (arg == "-p" || arg == "--port") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
params.port = std::stoi(argv[i]);
|
||||||
|
if (params.port <= 0 || params.port > 65535) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if (arg == "-m" || arg == "--mem") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
params.backend_mem = std::stoul(argv[i]) * 1024 * 1024;
|
||||||
|
} else if (arg == "-h" || arg == "--help") {
|
||||||
|
print_usage(argc, argv, params);
|
||||||
|
exit(0);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
|
print_usage(argc, argv, params);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_t create_backend() {
|
||||||
|
ggml_backend_t backend = NULL;
|
||||||
|
#ifdef GGML_USE_CUDA
|
||||||
|
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
||||||
|
backend = ggml_backend_cuda_init(0); // init device 0
|
||||||
|
if (!backend) {
|
||||||
|
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||||
|
}
|
||||||
|
#elif GGML_USE_METAL
|
||||||
|
fprintf(stderr, "%s: using Metal backend\n", __func__);
|
||||||
|
backend = ggml_backend_metal_init();
|
||||||
|
if (!backend) {
|
||||||
|
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// if there aren't GPU Backends fallback to CPU backend
|
||||||
|
if (!backend) {
|
||||||
|
fprintf(stderr, "%s: using CPU backend\n", __func__);
|
||||||
|
backend = ggml_backend_cpu_init();
|
||||||
|
}
|
||||||
|
return backend;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
|
||||||
|
#ifdef GGML_USE_CUDA
|
||||||
|
ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
|
||||||
|
#else
|
||||||
|
#ifdef _WIN32
|
||||||
|
MEMORYSTATUSEX status;
|
||||||
|
status.dwLength = sizeof(status);
|
||||||
|
GlobalMemoryStatusEx(&status);
|
||||||
|
*total_mem = status.ullTotalPhys;
|
||||||
|
*free_mem = status.ullAvailPhys;
|
||||||
|
#else
|
||||||
|
long pages = sysconf(_SC_PHYS_PAGES);
|
||||||
|
long page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
*total_mem = pages * page_size;
|
||||||
|
*free_mem = *total_mem;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char * argv[]) {
|
||||||
|
rpc_server_params params;
|
||||||
|
if (!rpc_server_params_parse(argc, argv, params)) {
|
||||||
|
fprintf(stderr, "Invalid parameters\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
ggml_backend_t backend = create_backend();
|
||||||
|
if (!backend) {
|
||||||
|
fprintf(stderr, "Failed to create backend\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
std::string endpoint = params.host + ":" + std::to_string(params.port);
|
||||||
|
size_t free_mem, total_mem;
|
||||||
|
if (params.backend_mem > 0) {
|
||||||
|
free_mem = params.backend_mem;
|
||||||
|
total_mem = params.backend_mem;
|
||||||
|
} else {
|
||||||
|
get_backend_memory(&free_mem, &total_mem);
|
||||||
|
}
|
||||||
|
printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024));
|
||||||
|
start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem);
|
||||||
|
ggml_backend_free(backend);
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -17,8 +17,9 @@ The project is under active development, and we are [looking for feedback and co
|
||||||
|
|
||||||
**Command line options:**
|
**Command line options:**
|
||||||
|
|
||||||
- `--threads N`, `-t N`: Set the number of threads to use during generation. Not used if model layers are offloaded to GPU. The server is using batching. This parameter is used only if one token is to be processed on CPU backend.
|
- `-v`, `--verbose`: Enable verbose server output. When using the `/completion` endpoint, this includes the tokenized prompt, the full request and the full response.
|
||||||
- `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation. Not used if model layers are offloaded to GPU.
|
- `-t N`, `--threads N`: Set the number of threads to use by CPU layers during generation. Not used by model layers that are offloaded to GPU. This option has no effect when using the maximum number of GPU layers. Default: `std::thread::hardware_concurrency()` (number of CPU cores).
|
||||||
|
- `-tb N, --threads-batch N`: Set the number of threads to use by CPU layers during batch and prompt processing (>= 32 tokens). This option has no effect if a GPU is available. Default: `--threads`.
|
||||||
- `--threads-http N`: Number of threads in the http server pool to process requests. Default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`
|
- `--threads-http N`: Number of threads in the http server pool to process requests. Default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`
|
||||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
||||||
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file. Default: unused
|
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file. Default: unused
|
||||||
|
@ -36,9 +37,7 @@ The project is under active development, and we are [looking for feedback and co
|
||||||
- `--numa STRATEGY`: Attempt one of the below optimization strategies that may help on some NUMA systems
|
- `--numa STRATEGY`: Attempt one of the below optimization strategies that may help on some NUMA systems
|
||||||
- `--numa distribute`: Spread execution evenly over all nodes
|
- `--numa distribute`: Spread execution evenly over all nodes
|
||||||
- `--numa isolate`: Only spawn threads on CPUs on the node that execution started on
|
- `--numa isolate`: Only spawn threads on CPUs on the node that execution started on
|
||||||
- `--numa numactl`: Use the CPU map provided by numactl. If run without this previously, it is recommended to drop the system
|
- `--numa numactl`: Use the CPU map provided by numactl. If run without this previously, it is recommended to drop the system page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/1437
|
||||||
page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/1437
|
|
||||||
|
|
||||||
- `--numa`: Attempt optimizations that may help on some NUMA systems.
|
- `--numa`: Attempt optimizations that may help on some NUMA systems.
|
||||||
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
||||||
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
||||||
|
@ -48,8 +47,8 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/
|
||||||
- `--path`: Path from which to serve static files. Default: disabled
|
- `--path`: Path from which to serve static files. Default: disabled
|
||||||
- `--api-key`: Set an api key for request authorization. By default, the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys.
|
- `--api-key`: Set an api key for request authorization. By default, the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys.
|
||||||
- `--api-key-file`: Path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`s.
|
- `--api-key-file`: Path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`s.
|
||||||
- `--embedding`: Enable embedding extraction. Default: disabled
|
- `--embeddings`: Enable embedding vector output and the OAI compatible endpoint /v1/embeddings. Physical batch size (`--ubatch-size`) must be carefully defined. Default: disabled
|
||||||
- `-np N`, `--parallel N`: Set the number of slots for process requests. Default: `1`
|
- `-np N`, `--parallel N`: Set the number of slots for process requests. Default: `1`. Values > 1 will allow for higher throughput with multiple parallel requests but the results will **not** be deterministic due to differences in rounding error.
|
||||||
- `-cb`, `--cont-batching`: Enable continuous batching (a.k.a dynamic batching). Default: disabled
|
- `-cb`, `--cont-batching`: Enable continuous batching (a.k.a dynamic batching). Default: disabled
|
||||||
- `-spf FNAME`, `--system-prompt-file FNAME` Set a file to load a system prompt (initial prompt of all slots). This is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
- `-spf FNAME`, `--system-prompt-file FNAME` Set a file to load a system prompt (initial prompt of all slots). This is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||||
- `--mmproj MMPROJ_FILE`: Path to a multimodal projector file for LLaVA.
|
- `--mmproj MMPROJ_FILE`: Path to a multimodal projector file for LLaVA.
|
||||||
|
|
|
@ -594,7 +594,7 @@
|
||||||
message = html`<${Probabilities} data=${data} />`
|
message = html`<${Probabilities} data=${data} />`
|
||||||
} else {
|
} else {
|
||||||
const text = isArrayMessage ?
|
const text = isArrayMessage ?
|
||||||
data.map(msg => msg.content).join('').replace(/^\s+/, '') :
|
data.map(msg => msg.content).join('') :
|
||||||
data;
|
data;
|
||||||
message = isCompletionMode ?
|
message = isCompletionMode ?
|
||||||
text :
|
text :
|
||||||
|
@ -877,7 +877,11 @@
|
||||||
|
|
||||||
// poor mans markdown replacement
|
// poor mans markdown replacement
|
||||||
const Markdownish = (params) => {
|
const Markdownish = (params) => {
|
||||||
const md = params.text
|
const chunks = params.text.split('```');
|
||||||
|
|
||||||
|
for (let i = 0; i < chunks.length; i++) {
|
||||||
|
if (i % 2 === 0) { // outside code block
|
||||||
|
chunks[i] = chunks[i]
|
||||||
.replace(/&/g, '&')
|
.replace(/&/g, '&')
|
||||||
.replace(/</g, '<')
|
.replace(/</g, '<')
|
||||||
.replace(/>/g, '>')
|
.replace(/>/g, '>')
|
||||||
|
@ -889,7 +893,14 @@
|
||||||
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
|
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
|
||||||
.replace(/`(.*?)`/g, '<code>$1</code>')
|
.replace(/`(.*?)`/g, '<code>$1</code>')
|
||||||
.replace(/\n/gim, '<br />');
|
.replace(/\n/gim, '<br />');
|
||||||
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
|
} else { // inside code block
|
||||||
|
chunks[i] = `<pre><code>${chunks[i]}</code></pre>`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const restoredText = chunks.join('');
|
||||||
|
|
||||||
|
return html`<span dangerouslySetInnerHTML=${{ __html: restoredText }} />`;
|
||||||
};
|
};
|
||||||
|
|
||||||
const ModelGenerationInfo = (params) => {
|
const ModelGenerationInfo = (params) => {
|
||||||
|
@ -903,6 +914,7 @@
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// simple popover impl
|
// simple popover impl
|
||||||
const Popover = (props) => {
|
const Popover = (props) => {
|
||||||
const isOpen = useSignal(false);
|
const isOpen = useSignal(false);
|
||||||
|
@ -1054,4 +1066,3 @@
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
</html>
|
</html>
|
||||||
|
|
||||||
|
|
49
examples/server/public_simplechat/index.html
Normal file
49
examples/server/public_simplechat/index.html
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<title>SimpleChat LlamaCppEtal </title>
|
||||||
|
<meta charset="UTF-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
|
<meta name="message" content="Save Nature Save Earth" />
|
||||||
|
<meta name="description" content="SimpleChat: trigger LLM web service endpoints /chat/completions and /completions, single/multi chat sessions" />
|
||||||
|
<meta name="author" content="by Humans for All" />
|
||||||
|
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
|
||||||
|
<script src="simplechat.js" defer></script>
|
||||||
|
<link rel="stylesheet" href="simplechat.css" />
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="samecolumn" id="fullbody">
|
||||||
|
|
||||||
|
<div class="sameline">
|
||||||
|
<p class="heading flex-grow" > <b> SimpleChat </b> </p>
|
||||||
|
<div class="sameline">
|
||||||
|
<label for="api-ep">Mode:</label>
|
||||||
|
<select name="api-ep" id="api-ep">
|
||||||
|
<option value="chat" selected>Chat</option>
|
||||||
|
<option value="completion">Completion</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="sessions-div" class="sameline"></div>
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
<div class="sameline">
|
||||||
|
<label for="system-in">System</label>
|
||||||
|
<input type="text" name="system" id="system-in" placeholder="e.g. you are a helpful ai assistant, who provides concise answers" class="flex-grow"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
<div id="chat-div">
|
||||||
|
<p> You need to have javascript enabled.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
<div class="sameline">
|
||||||
|
<textarea id="user-in" class="flex-grow" rows="3" placeholder="enter your query to the ai model here" ></textarea>
|
||||||
|
<button id="user-btn">submit</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
201
examples/server/public_simplechat/readme.md
Normal file
201
examples/server/public_simplechat/readme.md
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
|
||||||
|
# SimpleChat
|
||||||
|
|
||||||
|
by Humans for All.
|
||||||
|
|
||||||
|
|
||||||
|
## overview
|
||||||
|
|
||||||
|
This simple web frontend, allows triggering/testing the server's /completions or /chat/completions endpoints
|
||||||
|
in a simple way with minimal code from a common code base. Inturn additionally it tries to allow single or
|
||||||
|
multiple independent back and forth chatting to an extent, with the ai llm model at a basic level, with their
|
||||||
|
own system prompts.
|
||||||
|
|
||||||
|
The UI follows a responsive web design so that the layout can adapt to available display space in a usable
|
||||||
|
enough manner, in general.
|
||||||
|
|
||||||
|
Allows developer/end-user to control some of the behaviour by updating gMe members from browser's devel-tool
|
||||||
|
console.
|
||||||
|
|
||||||
|
NOTE: Given that the idea is for basic minimal testing, it doesnt bother with any model context length and
|
||||||
|
culling of old messages from the chat by default. However by enabling the sliding window chat logic, a crude
|
||||||
|
form of old messages culling can be achieved.
|
||||||
|
|
||||||
|
NOTE: It doesnt set any parameters other than temperature and max_tokens for now. However if someone wants
|
||||||
|
they can update the js file or equivalent member in gMe as needed.
|
||||||
|
|
||||||
|
|
||||||
|
## usage
|
||||||
|
|
||||||
|
One could run this web frontend directly using server itself or if anyone is thinking of adding a built in web
|
||||||
|
frontend to configure the server over http(s) or so, then run this web frontend using something like python's
|
||||||
|
http module.
|
||||||
|
|
||||||
|
### running using examples/server
|
||||||
|
|
||||||
|
bin/server -m path/model.gguf --path ../examples/server/public_simplechat [--port PORT]
|
||||||
|
|
||||||
|
### running using python3's server module
|
||||||
|
|
||||||
|
first run examples/server
|
||||||
|
* bin/server -m path/model.gguf
|
||||||
|
|
||||||
|
next run this web front end in examples/server/public_simplechat
|
||||||
|
* cd ../examples/server/public_simplechat
|
||||||
|
* python3 -m http.server PORT
|
||||||
|
|
||||||
|
### using the front end
|
||||||
|
|
||||||
|
Open this simple web front end from your local browser
|
||||||
|
|
||||||
|
* http://127.0.0.1:PORT/index.html
|
||||||
|
|
||||||
|
Once inside
|
||||||
|
|
||||||
|
* Select between chat and completion mode. By default it is set to chat mode.
|
||||||
|
|
||||||
|
* In completion mode
|
||||||
|
* logic by default doesnt insert any role specific "ROLE: " prefix wrt each role's message.
|
||||||
|
If the model requires any prefix wrt user role messages, then the end user has to
|
||||||
|
explicitly add the needed prefix, when they enter their chat message.
|
||||||
|
Similarly if the model requires any prefix to trigger assistant/ai-model response,
|
||||||
|
then the end user needs to enter the same.
|
||||||
|
This keeps the logic simple, while still giving flexibility to the end user to
|
||||||
|
manage any templating/tagging requirement wrt their messages to the model.
|
||||||
|
* the logic doesnt insert newline at the begining and end wrt the prompt message generated.
|
||||||
|
However if the chat being sent to /completions end point has more than one role's message,
|
||||||
|
then insert newline when moving from one role's message to the next role's message, so
|
||||||
|
that it can be clearly identified/distinguished.
|
||||||
|
* given that /completions endpoint normally doesnt add additional chat-templating of its
|
||||||
|
own, the above ensures that end user can create a custom single/multi message combo with
|
||||||
|
any tags/special-tokens related chat templating to test out model handshake. Or enduser
|
||||||
|
can use it just for normal completion related/based query.
|
||||||
|
|
||||||
|
* If you want to provide a system prompt, then ideally enter it first, before entering any user query.
|
||||||
|
Normally Completion mode doesnt need system prompt, while Chat mode can generate better/interesting
|
||||||
|
responses with a suitable system prompt.
|
||||||
|
* if chat.add_system_begin is used
|
||||||
|
* you cant change the system prompt, after it is has been submitted once along with user query.
|
||||||
|
* you cant set a system prompt, after you have submitted any user query
|
||||||
|
* if chat.add_system_anytime is used
|
||||||
|
* one can change the system prompt any time during chat, by changing the contents of system prompt.
|
||||||
|
* inturn the updated/changed system prompt will be inserted into the chat session.
|
||||||
|
* this allows for the subsequent user chatting to be driven by the new system prompt set above.
|
||||||
|
|
||||||
|
* Enter your query and either press enter or click on the submit button.
|
||||||
|
If you want to insert enter (\n) as part of your chat/query to ai model, use shift+enter.
|
||||||
|
|
||||||
|
* Wait for the logic to communicate with the server and get the response.
|
||||||
|
* the user is not allowed to enter any fresh query during this time.
|
||||||
|
* the user input box will be disabled and a working message will be shown in it.
|
||||||
|
|
||||||
|
* just refresh the page, to reset wrt the chat history and or system prompt and start afresh.
|
||||||
|
|
||||||
|
* Using NewChat one can start independent chat sessions.
|
||||||
|
* two independent chat sessions are setup by default.
|
||||||
|
|
||||||
|
|
||||||
|
## Devel note
|
||||||
|
|
||||||
|
### Reason behind this
|
||||||
|
|
||||||
|
The idea is to be easy enough to use for basic purposes, while also being simple and easily discernable
|
||||||
|
by developers who may not be from web frontend background (so inturn may not be familiar with template /
|
||||||
|
end-use-specific-language-extensions driven flows) so that they can use it to explore/experiment things.
|
||||||
|
|
||||||
|
And given that the idea is also to help explore/experiment for developers, some flexibility is provided
|
||||||
|
to change behaviour easily using the devel-tools/console, for now. And skeletal logic has been implemented
|
||||||
|
to explore some of the end points and ideas/implications around them.
|
||||||
|
|
||||||
|
|
||||||
|
### General
|
||||||
|
|
||||||
|
Me/gMe consolidates the settings which control the behaviour into one object.
|
||||||
|
One can see the current settings, as well as change/update them using browsers devel-tool/console.
|
||||||
|
|
||||||
|
bCompletionFreshChatAlways - whether Completion mode collates complete/sliding-window history when
|
||||||
|
communicating with the server or only sends the latest user query/message.
|
||||||
|
|
||||||
|
bCompletionInsertStandardRolePrefix - whether Completion mode inserts role related prefix wrt the
|
||||||
|
messages that get inserted into prompt field wrt /Completion endpoint.
|
||||||
|
|
||||||
|
chatRequestOptions - maintains the list of options/fields to send along with chat request,
|
||||||
|
irrespective of whether /chat/completions or /completions endpoint.
|
||||||
|
|
||||||
|
If you want to add additional options/fields to send to the server/ai-model, and or
|
||||||
|
modify the existing options value or remove them, for now you can update this global var
|
||||||
|
using browser's development-tools/console.
|
||||||
|
|
||||||
|
iRecentUserMsgCnt - a simple minded SlidingWindow to limit context window load at Ai Model end.
|
||||||
|
This is disabled by default. However if enabled, then in addition to latest system message, only
|
||||||
|
the last/latest iRecentUserMsgCnt user messages after the latest system prompt and its responses
|
||||||
|
from the ai model will be sent to the ai-model, when querying for a new response. IE if enabled,
|
||||||
|
only user messages after the latest system message/prompt will be considered.
|
||||||
|
|
||||||
|
This specified sliding window user message count also includes the latest user query.
|
||||||
|
<0 : Send entire chat history to server
|
||||||
|
0 : Send only the system message if any to the server
|
||||||
|
>0 : Send the latest chat history from the latest system prompt, limited to specified cnt.
|
||||||
|
|
||||||
|
|
||||||
|
By using gMe's iRecentUserMsgCnt and chatRequestOptions.max_tokens one can try to control the
|
||||||
|
implications of loading of the ai-model's context window by chat history, wrt chat response to
|
||||||
|
some extent in a simple crude way.
|
||||||
|
|
||||||
|
|
||||||
|
Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js
|
||||||
|
may not be visible. Also remember that just refreshing/reloading page in browser or for that
|
||||||
|
matter clearing site data, dont directly override site caching in all cases. Worst case you may
|
||||||
|
have to change port. Or in dev tools of browser, you may be able to disable caching fully.
|
||||||
|
|
||||||
|
|
||||||
|
Concept of multiple chat sessions with different servers, as well as saving and restoring of
|
||||||
|
those across browser usage sessions, can be woven around the SimpleChat/MultiChatUI class and
|
||||||
|
its instances relatively easily, however given the current goal of keeping this simple, it has
|
||||||
|
not been added, for now.
|
||||||
|
|
||||||
|
|
||||||
|
By switching between chat.add_system_begin/anytime, one can control whether one can change
|
||||||
|
the system prompt, anytime during the conversation or only at the beginning.
|
||||||
|
|
||||||
|
|
||||||
|
read_json_early, is to experiment with reading json response data early on, if available,
|
||||||
|
so that user can be shown generated data, as and when it is being generated, rather than
|
||||||
|
at the end when full data is available.
|
||||||
|
|
||||||
|
the server flow doesnt seem to be sending back data early, atleast for request (inc options)
|
||||||
|
that is currently sent.
|
||||||
|
|
||||||
|
if able to read json data early on in future, as and when ai model is generating data, then
|
||||||
|
this helper needs to indirectly update the chat div with the recieved data, without waiting
|
||||||
|
for the overall data to be available.
|
||||||
|
|
||||||
|
|
||||||
|
### Default setup
|
||||||
|
|
||||||
|
By default things are setup to try and make the user experience a bit better, if possible.
|
||||||
|
However a developer when testing the server of ai-model may want to change these value.
|
||||||
|
|
||||||
|
Using iRecentUserMsgCnt reduce chat history context sent to the server/ai-model to be
|
||||||
|
just the system-prompt, prev-user-request-and-ai-response and cur-user-request, instead of
|
||||||
|
full chat history. This way if there is any response with garbage/repeatation, it doesnt
|
||||||
|
mess with things beyond the next question/request/query, in some ways.
|
||||||
|
|
||||||
|
Set max_tokens to 1024, so that a relatively large previous reponse doesnt eat up the space
|
||||||
|
available wrt next query-response. However dont forget that the server when started should
|
||||||
|
also be started with a model context size of 1k or more, to be on safe side.
|
||||||
|
|
||||||
|
The /completions endpoint of examples/server doesnt take max_tokens, instead it takes the
|
||||||
|
internal n_predict, for now add the same here on the client side, maybe later add max_tokens
|
||||||
|
to /completions endpoint handling code on server side.
|
||||||
|
|
||||||
|
Frequency and presence penalty fields are set to 1.2 in the set of fields sent to server
|
||||||
|
along with the user query. So that the model is partly set to try avoid repeating text in
|
||||||
|
its response.
|
||||||
|
|
||||||
|
A end-user can change these behaviour by editing gMe from browser's devel-tool/console.
|
||||||
|
|
||||||
|
|
||||||
|
## At the end
|
||||||
|
|
||||||
|
Also a thank you to all open source and open model developers, who strive for the common good.
|
68
examples/server/public_simplechat/simplechat.css
Normal file
68
examples/server/public_simplechat/simplechat.css
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
/**
|
||||||
|
* the styling of the simplechat web frontend
|
||||||
|
* by Humans for All
|
||||||
|
*/
|
||||||
|
|
||||||
|
#fullbody {
|
||||||
|
height: 98vh;
|
||||||
|
}
|
||||||
|
|
||||||
|
.heading {
|
||||||
|
background-color: lightgray;
|
||||||
|
}
|
||||||
|
|
||||||
|
.session-selected {
|
||||||
|
background-color: lightblue;
|
||||||
|
}
|
||||||
|
|
||||||
|
.role-system {
|
||||||
|
background-color: lightblue;
|
||||||
|
}
|
||||||
|
.role-user {
|
||||||
|
background-color: lightgray;
|
||||||
|
}
|
||||||
|
|
||||||
|
.flex-grow {
|
||||||
|
flex-grow: 1;
|
||||||
|
}
|
||||||
|
.float-right {
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
#chat-div {
|
||||||
|
overflow: scroll;
|
||||||
|
flex-grow: 1;
|
||||||
|
flex-shrink: 1;
|
||||||
|
min-height: 40vh;
|
||||||
|
}
|
||||||
|
button {
|
||||||
|
min-width: 8vw;
|
||||||
|
}
|
||||||
|
|
||||||
|
.sameline {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: row;
|
||||||
|
}
|
||||||
|
.samecolumn {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
.ul1 {
|
||||||
|
padding-inline-start: 2vw;
|
||||||
|
}
|
||||||
|
.ul2 {
|
||||||
|
padding-inline-start: 2vw;
|
||||||
|
}
|
||||||
|
|
||||||
|
* {
|
||||||
|
margin: 0.6vmin;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media print {
|
||||||
|
|
||||||
|
#fullbody {
|
||||||
|
height: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
629
examples/server/public_simplechat/simplechat.js
Normal file
629
examples/server/public_simplechat/simplechat.js
Normal file
|
@ -0,0 +1,629 @@
|
||||||
|
// @ts-check
|
||||||
|
// A simple completions and chat/completions test related web front end logic
|
||||||
|
// by Humans for All
|
||||||
|
|
||||||
|
class Roles {
|
||||||
|
static System = "system";
|
||||||
|
static User = "user";
|
||||||
|
static Assistant = "assistant";
|
||||||
|
}
|
||||||
|
|
||||||
|
class ApiEP {
|
||||||
|
static Chat = "chat";
|
||||||
|
static Completion = "completion";
|
||||||
|
}
|
||||||
|
|
||||||
|
let gUsageMsg = `
|
||||||
|
<p class="role-system">Usage</p>
|
||||||
|
<ul class="ul1">
|
||||||
|
<li> Set system prompt above, to try control ai response charactersitic, if model supports same.</li>
|
||||||
|
<ul class="ul2">
|
||||||
|
<li> Completion mode normally wont have a system prompt.</li>
|
||||||
|
</ul>
|
||||||
|
<li> Enter your query to ai assistant below.</li>
|
||||||
|
<ul class="ul2">
|
||||||
|
<li> Completion mode doesnt insert user/role: prefix implicitly.</li>
|
||||||
|
<li> Use shift+enter for inserting enter/newline.</li>
|
||||||
|
</ul>
|
||||||
|
<li> Default ContextWindow = [System, Last Query+Resp, Cur Query].</li>
|
||||||
|
<ul class="ul2">
|
||||||
|
<li> experiment iRecentUserMsgCnt, max_tokens, model ctxt window to expand</li>
|
||||||
|
</ul>
|
||||||
|
</ul>
|
||||||
|
`;
|
||||||
|
|
||||||
|
/** @typedef {{role: string, content: string}[]} ChatMessages */
|
||||||
|
|
||||||
|
class SimpleChat {
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
/**
|
||||||
|
* Maintain in a form suitable for common LLM web service chat/completions' messages entry
|
||||||
|
* @type {ChatMessages}
|
||||||
|
*/
|
||||||
|
this.xchat = [];
|
||||||
|
this.iLastSys = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
clear() {
|
||||||
|
this.xchat = [];
|
||||||
|
this.iLastSys = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Recent chat messages.
|
||||||
|
* If iRecentUserMsgCnt < 0
|
||||||
|
* Then return the full chat history
|
||||||
|
* Else
|
||||||
|
* Return chat messages from latest going back till the last/latest system prompt.
|
||||||
|
* While keeping track that the number of user queries/messages doesnt exceed iRecentUserMsgCnt.
|
||||||
|
* @param {number} iRecentUserMsgCnt
|
||||||
|
*/
|
||||||
|
recent_chat(iRecentUserMsgCnt) {
|
||||||
|
if (iRecentUserMsgCnt < 0) {
|
||||||
|
return this.xchat;
|
||||||
|
}
|
||||||
|
if (iRecentUserMsgCnt == 0) {
|
||||||
|
console.warn("WARN:SimpleChat:SC:RecentChat:iRecentUsermsgCnt of 0 means no user message/query sent");
|
||||||
|
}
|
||||||
|
/** @type{ChatMessages} */
|
||||||
|
let rchat = [];
|
||||||
|
let sysMsg = this.get_system_latest();
|
||||||
|
if (sysMsg.length != 0) {
|
||||||
|
rchat.push({role: Roles.System, content: sysMsg});
|
||||||
|
}
|
||||||
|
let iUserCnt = 0;
|
||||||
|
let iStart = this.xchat.length;
|
||||||
|
for(let i=this.xchat.length-1; i > this.iLastSys; i--) {
|
||||||
|
if (iUserCnt >= iRecentUserMsgCnt) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let msg = this.xchat[i];
|
||||||
|
if (msg.role == Roles.User) {
|
||||||
|
iStart = i;
|
||||||
|
iUserCnt += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for(let i = iStart; i < this.xchat.length; i++) {
|
||||||
|
let msg = this.xchat[i];
|
||||||
|
if (msg.role == Roles.System) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
rchat.push({role: msg.role, content: msg.content});
|
||||||
|
}
|
||||||
|
return rchat;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add an entry into xchat
|
||||||
|
* @param {string} role
|
||||||
|
* @param {string|undefined|null} content
|
||||||
|
*/
|
||||||
|
add(role, content) {
|
||||||
|
if ((content == undefined) || (content == null) || (content == "")) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
this.xchat.push( {role: role, content: content} );
|
||||||
|
if (role == Roles.System) {
|
||||||
|
this.iLastSys = this.xchat.length - 1;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Show the contents in the specified div
|
||||||
|
* @param {HTMLDivElement} div
|
||||||
|
* @param {boolean} bClear
|
||||||
|
*/
|
||||||
|
show(div, bClear=true) {
|
||||||
|
if (bClear) {
|
||||||
|
div.replaceChildren();
|
||||||
|
}
|
||||||
|
let last = undefined;
|
||||||
|
for(const x of this.recent_chat(gMe.iRecentUserMsgCnt)) {
|
||||||
|
let entry = document.createElement("p");
|
||||||
|
entry.className = `role-${x.role}`;
|
||||||
|
entry.innerText = `${x.role}: ${x.content}`;
|
||||||
|
div.appendChild(entry);
|
||||||
|
last = entry;
|
||||||
|
}
|
||||||
|
if (last !== undefined) {
|
||||||
|
last.scrollIntoView(false);
|
||||||
|
} else {
|
||||||
|
if (bClear) {
|
||||||
|
div.innerHTML = gUsageMsg;
|
||||||
|
gMe.show_info(div);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
||||||
|
* The needed fields/options are picked from a global object.
|
||||||
|
* Convert the json into string.
|
||||||
|
* @param {Object} obj
|
||||||
|
*/
|
||||||
|
request_jsonstr(obj) {
|
||||||
|
for(let k in gMe.chatRequestOptions) {
|
||||||
|
obj[k] = gMe.chatRequestOptions[k];
|
||||||
|
}
|
||||||
|
return JSON.stringify(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return a string form of json object suitable for chat/completions
|
||||||
|
*/
|
||||||
|
request_messages_jsonstr() {
|
||||||
|
let req = {
|
||||||
|
messages: this.recent_chat(gMe.iRecentUserMsgCnt),
|
||||||
|
}
|
||||||
|
return this.request_jsonstr(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return a string form of json object suitable for /completions
|
||||||
|
* @param {boolean} bInsertStandardRolePrefix Insert "<THE_ROLE>: " as prefix wrt each role's message
|
||||||
|
*/
|
||||||
|
request_prompt_jsonstr(bInsertStandardRolePrefix) {
|
||||||
|
let prompt = "";
|
||||||
|
let iCnt = 0;
|
||||||
|
for(const chat of this.recent_chat(gMe.iRecentUserMsgCnt)) {
|
||||||
|
iCnt += 1;
|
||||||
|
if (iCnt > 1) {
|
||||||
|
prompt += "\n";
|
||||||
|
}
|
||||||
|
if (bInsertStandardRolePrefix) {
|
||||||
|
prompt += `${chat.role}: `;
|
||||||
|
}
|
||||||
|
prompt += `${chat.content}`;
|
||||||
|
}
|
||||||
|
let req = {
|
||||||
|
prompt: prompt,
|
||||||
|
}
|
||||||
|
return this.request_jsonstr(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allow setting of system prompt, but only at begining.
|
||||||
|
* @param {string} sysPrompt
|
||||||
|
* @param {string} msgTag
|
||||||
|
*/
|
||||||
|
add_system_begin(sysPrompt, msgTag) {
|
||||||
|
if (this.xchat.length == 0) {
|
||||||
|
if (sysPrompt.length > 0) {
|
||||||
|
return this.add(Roles.System, sysPrompt);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (sysPrompt.length > 0) {
|
||||||
|
if (this.xchat[0].role !== Roles.System) {
|
||||||
|
console.error(`ERRR:SimpleChat:SC:${msgTag}:You need to specify system prompt before any user query, ignoring...`);
|
||||||
|
} else {
|
||||||
|
if (this.xchat[0].content !== sysPrompt) {
|
||||||
|
console.error(`ERRR:SimpleChat:SC:${msgTag}:You cant change system prompt, mid way through, ignoring...`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allow setting of system prompt, at any time.
|
||||||
|
* @param {string} sysPrompt
|
||||||
|
* @param {string} msgTag
|
||||||
|
*/
|
||||||
|
add_system_anytime(sysPrompt, msgTag) {
|
||||||
|
if (sysPrompt.length <= 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.iLastSys < 0) {
|
||||||
|
return this.add(Roles.System, sysPrompt);
|
||||||
|
}
|
||||||
|
|
||||||
|
let lastSys = this.xchat[this.iLastSys].content;
|
||||||
|
if (lastSys !== sysPrompt) {
|
||||||
|
return this.add(Roles.System, sysPrompt);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieve the latest system prompt.
|
||||||
|
*/
|
||||||
|
get_system_latest() {
|
||||||
|
if (this.iLastSys == -1) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
let sysPrompt = this.xchat[this.iLastSys].content;
|
||||||
|
return sysPrompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
let gBaseURL = "http://127.0.0.1:8080";
|
||||||
|
let gChatURL = {
|
||||||
|
'chat': `${gBaseURL}/chat/completions`,
|
||||||
|
'completion': `${gBaseURL}/completions`,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the class of the children, based on whether it is the idSelected or not.
|
||||||
|
* @param {HTMLDivElement} elBase
|
||||||
|
* @param {string} idSelected
|
||||||
|
* @param {string} classSelected
|
||||||
|
* @param {string} classUnSelected
|
||||||
|
*/
|
||||||
|
function el_children_config_class(elBase, idSelected, classSelected, classUnSelected="") {
|
||||||
|
for(let child of elBase.children) {
|
||||||
|
if (child.id == idSelected) {
|
||||||
|
child.className = classSelected;
|
||||||
|
} else {
|
||||||
|
child.className = classUnSelected;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create button and set it up.
|
||||||
|
* @param {string} id
|
||||||
|
* @param {(this: HTMLButtonElement, ev: MouseEvent) => any} callback
|
||||||
|
* @param {string | undefined} name
|
||||||
|
* @param {string | undefined} innerText
|
||||||
|
*/
|
||||||
|
function el_create_button(id, callback, name=undefined, innerText=undefined) {
|
||||||
|
if (!name) {
|
||||||
|
name = id;
|
||||||
|
}
|
||||||
|
if (!innerText) {
|
||||||
|
innerText = id;
|
||||||
|
}
|
||||||
|
let btn = document.createElement("button");
|
||||||
|
btn.id = id;
|
||||||
|
btn.name = name;
|
||||||
|
btn.innerText = innerText;
|
||||||
|
btn.addEventListener("click", callback);
|
||||||
|
return btn;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MultiChatUI {
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
/** @type {Object<string, SimpleChat>} */
|
||||||
|
this.simpleChats = {};
|
||||||
|
/** @type {string} */
|
||||||
|
this.curChatId = "";
|
||||||
|
|
||||||
|
// the ui elements
|
||||||
|
this.elInSystem = /** @type{HTMLInputElement} */(document.getElementById("system-in"));
|
||||||
|
this.elDivChat = /** @type{HTMLDivElement} */(document.getElementById("chat-div"));
|
||||||
|
this.elBtnUser = /** @type{HTMLButtonElement} */(document.getElementById("user-btn"));
|
||||||
|
this.elInUser = /** @type{HTMLInputElement} */(document.getElementById("user-in"));
|
||||||
|
this.elSelectApiEP = /** @type{HTMLSelectElement} */(document.getElementById("api-ep"));
|
||||||
|
this.elDivSessions = /** @type{HTMLDivElement} */(document.getElementById("sessions-div"));
|
||||||
|
|
||||||
|
this.validate_element(this.elInSystem, "system-in");
|
||||||
|
this.validate_element(this.elDivChat, "chat-div");
|
||||||
|
this.validate_element(this.elInUser, "user-in");
|
||||||
|
this.validate_element(this.elSelectApiEP, "api-ep");
|
||||||
|
this.validate_element(this.elDivChat, "sessions-div");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the element got
|
||||||
|
* @param {HTMLElement | null} el
|
||||||
|
* @param {string} msgTag
|
||||||
|
*/
|
||||||
|
validate_element(el, msgTag) {
|
||||||
|
if (el == null) {
|
||||||
|
throw Error(`ERRR:SimpleChat:MCUI:${msgTag} element missing in html...`);
|
||||||
|
} else {
|
||||||
|
console.debug(`INFO:SimpleChat:MCUI:${msgTag} Id[${el.id}] Name[${el["name"]}]`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reset user input ui.
|
||||||
|
* * clear user input
|
||||||
|
* * enable user input
|
||||||
|
* * set focus to user input
|
||||||
|
*/
|
||||||
|
ui_reset_userinput() {
|
||||||
|
this.elInUser.value = "";
|
||||||
|
this.elInUser.disabled = false;
|
||||||
|
this.elInUser.focus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup the needed callbacks wrt UI, curChatId to defaultChatId and
|
||||||
|
* optionally switch to specified defaultChatId.
|
||||||
|
* @param {string} defaultChatId
|
||||||
|
* @param {boolean} bSwitchSession
|
||||||
|
*/
|
||||||
|
setup_ui(defaultChatId, bSwitchSession=false) {
|
||||||
|
|
||||||
|
this.curChatId = defaultChatId;
|
||||||
|
if (bSwitchSession) {
|
||||||
|
this.handle_session_switch(this.curChatId);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.elBtnUser.addEventListener("click", (ev)=>{
|
||||||
|
if (this.elInUser.disabled) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.handle_user_submit(this.curChatId, this.elSelectApiEP.value).catch((/** @type{Error} */reason)=>{
|
||||||
|
let msg = `ERRR:SimpleChat\nMCUI:HandleUserSubmit:${this.curChatId}\n${reason.name}:${reason.message}`;
|
||||||
|
console.debug(msg.replace("\n", ":"));
|
||||||
|
alert(msg);
|
||||||
|
this.ui_reset_userinput();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
this.elInUser.addEventListener("keyup", (ev)=> {
|
||||||
|
// allow user to insert enter into their message using shift+enter.
|
||||||
|
// while just pressing enter key will lead to submitting.
|
||||||
|
if ((ev.key === "Enter") && (!ev.shiftKey)) {
|
||||||
|
let value = this.elInUser.value;
|
||||||
|
this.elInUser.value = value.substring(0,value.length-1);
|
||||||
|
this.elBtnUser.click();
|
||||||
|
ev.preventDefault();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
this.elInSystem.addEventListener("keyup", (ev)=> {
|
||||||
|
// allow user to insert enter into the system prompt using shift+enter.
|
||||||
|
// while just pressing enter key will lead to setting the system prompt.
|
||||||
|
if ((ev.key === "Enter") && (!ev.shiftKey)) {
|
||||||
|
let chat = this.simpleChats[this.curChatId];
|
||||||
|
chat.add_system_anytime(this.elInSystem.value, this.curChatId);
|
||||||
|
chat.show(this.elDivChat);
|
||||||
|
ev.preventDefault();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup a new chat session and optionally switch to it.
|
||||||
|
* @param {string} chatId
|
||||||
|
* @param {boolean} bSwitchSession
|
||||||
|
*/
|
||||||
|
new_chat_session(chatId, bSwitchSession=false) {
|
||||||
|
this.simpleChats[chatId] = new SimpleChat();
|
||||||
|
if (bSwitchSession) {
|
||||||
|
this.handle_session_switch(chatId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try read json response early, if available.
|
||||||
|
* @param {Response} resp
|
||||||
|
*/
|
||||||
|
async read_json_early(resp) {
|
||||||
|
if (!resp.body) {
|
||||||
|
throw Error("ERRR:SimpleChat:MCUI:ReadJsonEarly:No body...");
|
||||||
|
}
|
||||||
|
let tdUtf8 = new TextDecoder("utf-8");
|
||||||
|
let rr = resp.body.getReader();
|
||||||
|
let gotBody = "";
|
||||||
|
while(true) {
|
||||||
|
let { value: cur, done: done} = await rr.read();
|
||||||
|
let curBody = tdUtf8.decode(cur);
|
||||||
|
console.debug("DBUG:SC:PART:", curBody);
|
||||||
|
gotBody += curBody;
|
||||||
|
if (done) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return JSON.parse(gotBody);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle user query submit request, wrt specified chat session.
|
||||||
|
* @param {string} chatId
|
||||||
|
* @param {string} apiEP
|
||||||
|
*/
|
||||||
|
async handle_user_submit(chatId, apiEP) {
|
||||||
|
|
||||||
|
let chat = this.simpleChats[chatId];
|
||||||
|
|
||||||
|
// In completion mode, if configured, clear any previous chat history.
|
||||||
|
// So if user wants to simulate a multi-chat based completion query,
|
||||||
|
// they will have to enter the full thing, as a suitable multiline
|
||||||
|
// user input/query.
|
||||||
|
if ((apiEP == ApiEP.Completion) && (gMe.bCompletionFreshChatAlways)) {
|
||||||
|
chat.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
chat.add_system_anytime(this.elInSystem.value, chatId);
|
||||||
|
|
||||||
|
let content = this.elInUser.value;
|
||||||
|
if (!chat.add(Roles.User, content)) {
|
||||||
|
console.debug(`WARN:SimpleChat:MCUI:${chatId}:HandleUserSubmit:Ignoring empty user input...`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
chat.show(this.elDivChat);
|
||||||
|
|
||||||
|
let theBody;
|
||||||
|
let theUrl = gChatURL[apiEP]
|
||||||
|
if (apiEP == ApiEP.Chat) {
|
||||||
|
theBody = chat.request_messages_jsonstr();
|
||||||
|
} else {
|
||||||
|
theBody = chat.request_prompt_jsonstr(gMe.bCompletionInsertStandardRolePrefix);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.elInUser.value = "working...";
|
||||||
|
this.elInUser.disabled = true;
|
||||||
|
console.debug(`DBUG:SimpleChat:MCUI:${chatId}:HandleUserSubmit:${theUrl}:ReqBody:${theBody}`);
|
||||||
|
let resp = await fetch(theUrl, {
|
||||||
|
method: "POST",
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
body: theBody,
|
||||||
|
});
|
||||||
|
|
||||||
|
let respBody = await resp.json();
|
||||||
|
//let respBody = await this.read_json_early(resp);
|
||||||
|
console.debug(`DBUG:SimpleChat:MCUI:${chatId}:HandleUserSubmit:RespBody:${JSON.stringify(respBody)}`);
|
||||||
|
let assistantMsg;
|
||||||
|
if (apiEP == ApiEP.Chat) {
|
||||||
|
assistantMsg = respBody["choices"][0]["message"]["content"];
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
assistantMsg = respBody["choices"][0]["text"];
|
||||||
|
} catch {
|
||||||
|
assistantMsg = respBody["content"];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chat.add(Roles.Assistant, assistantMsg);
|
||||||
|
if (chatId == this.curChatId) {
|
||||||
|
chat.show(this.elDivChat);
|
||||||
|
} else {
|
||||||
|
console.debug(`DBUG:SimpleChat:MCUI:HandleUserSubmit:ChatId has changed:[${chatId}] [${this.curChatId}]`);
|
||||||
|
}
|
||||||
|
this.ui_reset_userinput();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Show buttons for NewChat and available chat sessions, in the passed elDiv.
|
||||||
|
* If elDiv is undefined/null, then use this.elDivSessions.
|
||||||
|
* Take care of highlighting the selected chat-session's btn.
|
||||||
|
* @param {HTMLDivElement | undefined} elDiv
|
||||||
|
*/
|
||||||
|
show_sessions(elDiv=undefined) {
|
||||||
|
if (!elDiv) {
|
||||||
|
elDiv = this.elDivSessions;
|
||||||
|
}
|
||||||
|
elDiv.replaceChildren();
|
||||||
|
// Btn for creating new chat session
|
||||||
|
let btnNew = el_create_button("New CHAT", (ev)=> {
|
||||||
|
if (this.elInUser.disabled) {
|
||||||
|
console.error(`ERRR:SimpleChat:MCUI:NewChat:Current session [${this.curChatId}] awaiting response, ignoring request...`);
|
||||||
|
alert("ERRR:SimpleChat\nMCUI:NewChat\nWait for response to pending query, before starting new chat session");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let chatId = `Chat${Object.keys(this.simpleChats).length}`;
|
||||||
|
let chatIdGot = prompt("INFO:SimpleChat\nMCUI:NewChat\nEnter id for new chat session", chatId);
|
||||||
|
if (!chatIdGot) {
|
||||||
|
console.error("ERRR:SimpleChat:MCUI:NewChat:Skipping based on user request...");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.new_chat_session(chatIdGot, true);
|
||||||
|
this.create_session_btn(elDiv, chatIdGot);
|
||||||
|
el_children_config_class(elDiv, chatIdGot, "session-selected", "");
|
||||||
|
});
|
||||||
|
elDiv.appendChild(btnNew);
|
||||||
|
// Btns for existing chat sessions
|
||||||
|
let chatIds = Object.keys(this.simpleChats);
|
||||||
|
for(let cid of chatIds) {
|
||||||
|
let btn = this.create_session_btn(elDiv, cid);
|
||||||
|
if (cid == this.curChatId) {
|
||||||
|
btn.className = "session-selected";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
create_session_btn(elDiv, cid) {
|
||||||
|
let btn = el_create_button(cid, (ev)=>{
|
||||||
|
let target = /** @type{HTMLButtonElement} */(ev.target);
|
||||||
|
console.debug(`DBUG:SimpleChat:MCUI:SessionClick:${target.id}`);
|
||||||
|
if (this.elInUser.disabled) {
|
||||||
|
console.error(`ERRR:SimpleChat:MCUI:SessionClick:${target.id}:Current session [${this.curChatId}] awaiting response, ignoring switch...`);
|
||||||
|
alert("ERRR:SimpleChat\nMCUI:SessionClick\nWait for response to pending query, before switching");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.handle_session_switch(target.id);
|
||||||
|
el_children_config_class(elDiv, target.id, "session-selected", "");
|
||||||
|
});
|
||||||
|
elDiv.appendChild(btn);
|
||||||
|
return btn;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Switch ui to the specified chatId and set curChatId to same.
|
||||||
|
* @param {string} chatId
|
||||||
|
*/
|
||||||
|
async handle_session_switch(chatId) {
|
||||||
|
let chat = this.simpleChats[chatId];
|
||||||
|
if (chat == undefined) {
|
||||||
|
console.error(`ERRR:SimpleChat:MCUI:HandleSessionSwitch:${chatId} missing...`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.elInSystem.value = chat.get_system_latest();
|
||||||
|
this.elInUser.value = "";
|
||||||
|
chat.show(this.elDivChat);
|
||||||
|
this.elInUser.focus();
|
||||||
|
this.curChatId = chatId;
|
||||||
|
console.log(`INFO:SimpleChat:MCUI:HandleSessionSwitch:${chatId} entered...`);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Me {
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.defaultChatIds = [ "Default", "Other" ];
|
||||||
|
this.multiChat = new MultiChatUI();
|
||||||
|
this.bCompletionFreshChatAlways = true;
|
||||||
|
this.bCompletionInsertStandardRolePrefix = false;
|
||||||
|
this.iRecentUserMsgCnt = 2;
|
||||||
|
// Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
||||||
|
this.chatRequestOptions = {
|
||||||
|
"temperature": 0.7,
|
||||||
|
"max_tokens": 1024,
|
||||||
|
"frequency_penalty": 1.2,
|
||||||
|
"presence_penalty": 1.2,
|
||||||
|
"n_predict": 1024
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {HTMLDivElement} elDiv
|
||||||
|
*/
|
||||||
|
show_info(elDiv) {
|
||||||
|
|
||||||
|
var p = document.createElement("p");
|
||||||
|
p.innerText = "Settings (devel-tools-console gMe)";
|
||||||
|
p.className = "role-system";
|
||||||
|
elDiv.appendChild(p);
|
||||||
|
|
||||||
|
var p = document.createElement("p");
|
||||||
|
p.innerText = `bCompletionFreshChatAlways:${this.bCompletionFreshChatAlways}`;
|
||||||
|
elDiv.appendChild(p);
|
||||||
|
|
||||||
|
p = document.createElement("p");
|
||||||
|
p.innerText = `bCompletionInsertStandardRolePrefix:${this.bCompletionInsertStandardRolePrefix}`;
|
||||||
|
elDiv.appendChild(p);
|
||||||
|
|
||||||
|
p = document.createElement("p");
|
||||||
|
p.innerText = `iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`;
|
||||||
|
elDiv.appendChild(p);
|
||||||
|
|
||||||
|
p = document.createElement("p");
|
||||||
|
p.innerText = `chatRequestOptions:${JSON.stringify(this.chatRequestOptions)}`;
|
||||||
|
elDiv.appendChild(p);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** @type {Me} */
|
||||||
|
let gMe;
|
||||||
|
|
||||||
|
function startme() {
|
||||||
|
console.log("INFO:SimpleChat:StartMe:Starting...");
|
||||||
|
gMe = new Me();
|
||||||
|
for (let cid of gMe.defaultChatIds) {
|
||||||
|
gMe.multiChat.new_chat_session(cid);
|
||||||
|
}
|
||||||
|
gMe.multiChat.setup_ui(gMe.defaultChatIds[0], true);
|
||||||
|
gMe.multiChat.show_sessions();
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", startme);
|
|
@ -102,7 +102,6 @@ struct slot_params {
|
||||||
bool stream = true;
|
bool stream = true;
|
||||||
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
|
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
|
||||||
|
|
||||||
uint32_t seed = -1; // RNG seed
|
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
|
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
|
||||||
int32_t n_predict = -1; // new tokens to predict
|
int32_t n_predict = -1; // new tokens to predict
|
||||||
|
@ -651,9 +650,6 @@ struct server_context {
|
||||||
std::string system_prompt;
|
std::string system_prompt;
|
||||||
std::vector<llama_token> system_tokens;
|
std::vector<llama_token> system_tokens;
|
||||||
|
|
||||||
std::string name_user; // this should be the antiprompt
|
|
||||||
std::string name_assistant;
|
|
||||||
|
|
||||||
// slots / clients
|
// slots / clients
|
||||||
std::vector<server_slot> slots;
|
std::vector<server_slot> slots;
|
||||||
json default_generation_settings_for_props;
|
json default_generation_settings_for_props;
|
||||||
|
@ -673,6 +669,15 @@ struct server_context {
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
model = nullptr;
|
model = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear any sampling context
|
||||||
|
for (server_slot & slot : slots) {
|
||||||
|
if (slot.ctx_sampling != nullptr) {
|
||||||
|
llama_sampling_free(slot.ctx_sampling);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool load_model(const gpt_params & params_) {
|
bool load_model(const gpt_params & params_) {
|
||||||
|
@ -1014,7 +1019,7 @@ struct server_context {
|
||||||
sampler_names.emplace_back(sampler_name);
|
sampler_names.emplace_back(sampler_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slot.sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
|
slot.sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
|
||||||
} else {
|
} else {
|
||||||
slot.sparams.samplers_sequence = default_sparams.samplers_sequence;
|
slot.sparams.samplers_sequence = default_sparams.samplers_sequence;
|
||||||
}
|
}
|
||||||
|
@ -1098,15 +1103,11 @@ struct server_context {
|
||||||
system_need_update = false;
|
system_need_update = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void system_prompt_set(const json & sys_props) {
|
bool system_prompt_set(const std::string & sys_prompt) {
|
||||||
system_prompt = sys_props.value("prompt", "");
|
system_prompt = sys_prompt;
|
||||||
name_user = sys_props.value("anti_prompt", "");
|
|
||||||
name_assistant = sys_props.value("assistant_name", "");
|
|
||||||
|
|
||||||
LOG_VERBOSE("system prompt process", {
|
LOG_VERBOSE("system prompt process", {
|
||||||
{"system_prompt", system_prompt},
|
{"system_prompt", system_prompt},
|
||||||
{"name_user", name_user},
|
|
||||||
{"name_assistant", name_assistant},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// release all slots
|
// release all slots
|
||||||
|
@ -1115,6 +1116,7 @@ struct server_context {
|
||||||
}
|
}
|
||||||
|
|
||||||
system_need_update = true;
|
system_need_update = true;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool process_token(completion_token_output & result, server_slot & slot) {
|
bool process_token(completion_token_output & result, server_slot & slot) {
|
||||||
|
@ -1254,14 +1256,14 @@ struct server_context {
|
||||||
std::vector<std::string> samplers_sequence;
|
std::vector<std::string> samplers_sequence;
|
||||||
samplers_sequence.reserve(slot.sparams.samplers_sequence.size());
|
samplers_sequence.reserve(slot.sparams.samplers_sequence.size());
|
||||||
for (const auto & sampler_type : slot.sparams.samplers_sequence) {
|
for (const auto & sampler_type : slot.sparams.samplers_sequence) {
|
||||||
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
|
samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
|
||||||
}
|
}
|
||||||
|
|
||||||
return json {
|
return json {
|
||||||
{"n_ctx", slot.n_ctx},
|
{"n_ctx", slot.n_ctx},
|
||||||
{"n_predict", slot.n_predict},
|
{"n_predict", slot.n_predict},
|
||||||
{"model", params.model_alias},
|
{"model", params.model_alias},
|
||||||
{"seed", slot.params.seed},
|
{"seed", slot.sparams.seed},
|
||||||
{"temperature", slot.sparams.temp},
|
{"temperature", slot.sparams.temp},
|
||||||
{"dynatemp_range", slot.sparams.dynatemp_range},
|
{"dynatemp_range", slot.sparams.dynatemp_range},
|
||||||
{"dynatemp_exponent", slot.sparams.dynatemp_exponent},
|
{"dynatemp_exponent", slot.sparams.dynatemp_exponent},
|
||||||
|
@ -1534,7 +1536,8 @@ struct server_context {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task.data.contains("system_prompt")) {
|
if (task.data.contains("system_prompt")) {
|
||||||
system_prompt_set(task.data.at("system_prompt"));
|
std::string sys_prompt = json_value(task.data, "system_prompt", std::string());
|
||||||
|
system_prompt_set(sys_prompt);
|
||||||
|
|
||||||
for (server_slot & slot : slots) {
|
for (server_slot & slot : slots) {
|
||||||
slot.n_past = 0;
|
slot.n_past = 0;
|
||||||
|
@ -1978,8 +1981,7 @@ struct server_context {
|
||||||
slot.state = SLOT_STATE_PROCESSING;
|
slot.state = SLOT_STATE_PROCESSING;
|
||||||
slot.command = SLOT_COMMAND_NONE;
|
slot.command = SLOT_COMMAND_NONE;
|
||||||
slot.release();
|
slot.release();
|
||||||
slot.print_timings();
|
send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
|
||||||
send_final_response(slot);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -2270,10 +2272,10 @@ struct server_context {
|
||||||
|
|
||||||
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
|
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
|
||||||
if (n_probs > 0) {
|
if (n_probs > 0) {
|
||||||
const size_t n_considered = slot.ctx_sampling->n_considered;
|
const size_t n_valid = slot.ctx_sampling->n_valid;
|
||||||
|
|
||||||
// Make sure at least n_probs top tokens are at the front of the vector:
|
// Make sure at least n_probs top tokens are at the front of the vector:
|
||||||
if (slot.sparams.temp == 0.0f && n_probs > n_considered) {
|
if (slot.sparams.temp == 0.0f && n_probs > n_valid) {
|
||||||
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
|
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2289,7 +2291,7 @@ struct server_context {
|
||||||
for (size_t i = 0; i < n_probs; ++i) {
|
for (size_t i = 0; i < n_probs; ++i) {
|
||||||
result.probs.push_back({
|
result.probs.push_back({
|
||||||
cur_p.data[i].id,
|
cur_p.data[i].id,
|
||||||
i >= n_considered ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
i >= n_valid ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2383,6 +2385,7 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co
|
||||||
printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
||||||
printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
|
printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
|
||||||
printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
|
printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
|
||||||
|
printf(" --rpc SERVERS comma separated list of RPC servers\n");
|
||||||
printf(" --path PUBLIC_PATH path from which to serve static files (default: disabled)\n");
|
printf(" --path PUBLIC_PATH path from which to serve static files (default: disabled)\n");
|
||||||
printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
|
printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
|
||||||
printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
|
printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
|
||||||
|
@ -2435,6 +2438,12 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sparams.port = std::stoi(argv[i]);
|
sparams.port = std::stoi(argv[i]);
|
||||||
|
} else if (arg == "--rpc") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.rpc_servers = argv[i];
|
||||||
} else if (arg == "--host") {
|
} else if (arg == "--host") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
@ -2843,7 +2852,7 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!parse_kv_override(argv[i], params.kv_overrides)) {
|
if (!string_parse_kv_override(argv[i], params.kv_overrides)) {
|
||||||
fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
|
fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
|
@ -2918,7 +2927,7 @@ int main(int argc, char ** argv) {
|
||||||
server_params_parse(argc, argv, sparams, params);
|
server_params_parse(argc, argv, sparams, params);
|
||||||
|
|
||||||
if (!sparams.system_prompt.empty()) {
|
if (!sparams.system_prompt.empty()) {
|
||||||
ctx_server.system_prompt_set(json::parse(sparams.system_prompt));
|
ctx_server.system_prompt_set(sparams.system_prompt);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.model_alias == "unknown") {
|
if (params.model_alias == "unknown") {
|
||||||
|
@ -3308,7 +3317,7 @@ int main(int argc, char ** argv) {
|
||||||
const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
||||||
json request_data = json::parse(req.body);
|
json request_data = json::parse(req.body);
|
||||||
std::string filename = request_data.at("filename");
|
std::string filename = request_data.at("filename");
|
||||||
if (!validate_file_name(filename)) {
|
if (!fs_validate_filename(filename)) {
|
||||||
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3338,7 +3347,7 @@ int main(int argc, char ** argv) {
|
||||||
const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
||||||
json request_data = json::parse(req.body);
|
json request_data = json::parse(req.body);
|
||||||
std::string filename = request_data.at("filename");
|
std::string filename = request_data.at("filename");
|
||||||
if (!validate_file_name(filename)) {
|
if (!fs_validate_filename(filename)) {
|
||||||
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3414,8 +3423,7 @@ int main(int argc, char ** argv) {
|
||||||
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||||
json data = {
|
json data = {
|
||||||
{ "user_name", ctx_server.name_user.c_str() },
|
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
||||||
{ "assistant_name", ctx_server.name_assistant.c_str() },
|
|
||||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||||
{ "total_slots", ctx_server.params.n_parallel }
|
{ "total_slots", ctx_server.params.n_parallel }
|
||||||
};
|
};
|
||||||
|
|
|
@ -13,6 +13,7 @@ Feature: Results
|
||||||
|
|
||||||
Scenario Outline: consistent results with same seed
|
Scenario Outline: consistent results with same seed
|
||||||
Given <n_slots> slots
|
Given <n_slots> slots
|
||||||
|
And 1.0 temperature
|
||||||
Then the server is starting
|
Then the server is starting
|
||||||
Then the server is healthy
|
Then the server is healthy
|
||||||
|
|
||||||
|
@ -26,10 +27,12 @@ Feature: Results
|
||||||
Examples:
|
Examples:
|
||||||
| n_slots |
|
| n_slots |
|
||||||
| 1 |
|
| 1 |
|
||||||
| 2 |
|
# FIXME: unified KV cache nondeterminism
|
||||||
|
# | 2 |
|
||||||
|
|
||||||
Scenario Outline: different results with different seed
|
Scenario Outline: different results with different seed
|
||||||
Given <n_slots> slots
|
Given <n_slots> slots
|
||||||
|
And 1.0 temperature
|
||||||
Then the server is starting
|
Then the server is starting
|
||||||
Then the server is healthy
|
Then the server is healthy
|
||||||
|
|
||||||
|
@ -71,11 +74,45 @@ Feature: Results
|
||||||
Examples:
|
Examples:
|
||||||
| n_parallel | temp |
|
| n_parallel | temp |
|
||||||
| 1 | 0.0 |
|
| 1 | 0.0 |
|
||||||
| 2 | 0.0 |
|
|
||||||
| 4 | 0.0 |
|
|
||||||
| 1 | 1.0 |
|
| 1 | 1.0 |
|
||||||
# FIXME: These tests fail on master. The problem seems to be the unified KV cache.
|
# FIXME: unified KV cache nondeterminism
|
||||||
# See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
|
# See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
|
||||||
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574 .
|
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574
|
||||||
|
# and https://github.com/ggerganov/llama.cpp/pull/7347 .
|
||||||
|
# | 2 | 0.0 |
|
||||||
|
# | 4 | 0.0 |
|
||||||
# | 2 | 1.0 |
|
# | 2 | 1.0 |
|
||||||
# | 4 | 1.0 |
|
# | 4 | 1.0 |
|
||||||
|
|
||||||
|
Scenario Outline: consistent token probs with same seed and prompt
|
||||||
|
Given <n_slots> slots
|
||||||
|
And <n_kv> KV cache size
|
||||||
|
And 1.0 temperature
|
||||||
|
And <n_predict> max tokens to predict
|
||||||
|
Then the server is starting
|
||||||
|
Then the server is healthy
|
||||||
|
|
||||||
|
Given 1 prompts "The meaning of life is" with seed 42
|
||||||
|
And concurrent completion requests
|
||||||
|
# Then the server is busy # Not all slots will be utilized.
|
||||||
|
Then the server is idle
|
||||||
|
And all slots are idle
|
||||||
|
|
||||||
|
Given <n_parallel> prompts "The meaning of life is" with seed 42
|
||||||
|
And concurrent completion requests
|
||||||
|
# Then the server is busy # Not all slots will be utilized.
|
||||||
|
Then the server is idle
|
||||||
|
And all slots are idle
|
||||||
|
|
||||||
|
Then all token probabilities are equal
|
||||||
|
Examples:
|
||||||
|
| n_slots | n_kv | n_predict | n_parallel |
|
||||||
|
| 4 | 1024 | 1 | 1 |
|
||||||
|
# FIXME: unified KV cache nondeterminism
|
||||||
|
# See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
|
||||||
|
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574
|
||||||
|
# and https://github.com/ggerganov/llama.cpp/pull/7347 .
|
||||||
|
# | 4 | 1024 | 1 | 4 |
|
||||||
|
# | 4 | 1024 | 100 | 1 |
|
||||||
|
# This test still fails even the above patches; the first token probabilities are already different.
|
||||||
|
# | 4 | 1024 | 100 | 4 |
|
||||||
|
|
|
@ -23,6 +23,7 @@ from prometheus_client import parser
|
||||||
def step_server_config(context, server_fqdn, server_port):
|
def step_server_config(context, server_fqdn, server_port):
|
||||||
context.server_fqdn = server_fqdn
|
context.server_fqdn = server_fqdn
|
||||||
context.server_port = int(server_port)
|
context.server_port = int(server_port)
|
||||||
|
context.n_threads = None
|
||||||
context.n_gpu_layer = None
|
context.n_gpu_layer = None
|
||||||
if 'PORT' in os.environ:
|
if 'PORT' in os.environ:
|
||||||
context.server_port = int(os.environ['PORT'])
|
context.server_port = int(os.environ['PORT'])
|
||||||
|
@ -109,6 +110,11 @@ def step_n_gpu_layer(context, ngl):
|
||||||
context.n_gpu_layer = ngl
|
context.n_gpu_layer = ngl
|
||||||
|
|
||||||
|
|
||||||
|
@step('{n_threads:d} threads')
|
||||||
|
def step_n_threads(context, n_threads):
|
||||||
|
context.n_thread = n_threads
|
||||||
|
|
||||||
|
|
||||||
@step('{draft:d} as draft')
|
@step('{draft:d} as draft')
|
||||||
def step_draft(context, draft):
|
def step_draft(context, draft):
|
||||||
context.draft = draft
|
context.draft = draft
|
||||||
|
@ -193,7 +199,7 @@ async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
||||||
|
|
||||||
case 'ready' | 'idle':
|
case 'ready' | 'idle':
|
||||||
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||||
timeout=10,
|
timeout=30,
|
||||||
params={'fail_on_no_slot': 0, 'include_slots': 0},
|
params={'fail_on_no_slot': 0, 'include_slots': 0},
|
||||||
slots_idle=context.n_slots,
|
slots_idle=context.n_slots,
|
||||||
slots_processing=0,
|
slots_processing=0,
|
||||||
|
@ -274,13 +280,22 @@ async def step_predictions_equal(context):
|
||||||
|
|
||||||
@step('all predictions are different')
|
@step('all predictions are different')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_predictions_equal(context):
|
async def step_predictions_different(context):
|
||||||
n_completions = await gather_tasks_results(context)
|
n_completions = await gather_tasks_results(context)
|
||||||
assert n_completions >= 2, "need at least 2 completions"
|
assert n_completions >= 2, "need at least 2 completions"
|
||||||
assert_all_predictions_different(context.tasks_result)
|
assert_all_predictions_different(context.tasks_result)
|
||||||
context.tasks_result = []
|
context.tasks_result = []
|
||||||
|
|
||||||
|
|
||||||
|
@step('all token probabilities are equal')
|
||||||
|
@async_run_until_complete
|
||||||
|
async def step_token_probabilities_equal(context):
|
||||||
|
n_completions = await gather_tasks_results(context)
|
||||||
|
assert n_completions >= 2, "need at least 2 completions"
|
||||||
|
assert_all_token_probabilities_equal(context.tasks_result)
|
||||||
|
context.tasks_result = []
|
||||||
|
|
||||||
|
|
||||||
@step('the completion is truncated')
|
@step('the completion is truncated')
|
||||||
def step_assert_completion_truncated(context):
|
def step_assert_completion_truncated(context):
|
||||||
step_assert_completion_truncated(context, '')
|
step_assert_completion_truncated(context, '')
|
||||||
|
@ -868,7 +883,8 @@ async def request_completion(prompt,
|
||||||
"cache_prompt": cache_prompt,
|
"cache_prompt": cache_prompt,
|
||||||
"id_slot": id_slot,
|
"id_slot": id_slot,
|
||||||
"seed": seed if seed is not None else 42,
|
"seed": seed if seed is not None else 42,
|
||||||
"temperature": temperature if temperature is not None else "0.8f",
|
"temperature": temperature if temperature is not None else 0.8,
|
||||||
|
"n_probs": 2,
|
||||||
},
|
},
|
||||||
headers=headers,
|
headers=headers,
|
||||||
timeout=3600) as response:
|
timeout=3600) as response:
|
||||||
|
@ -887,6 +903,7 @@ async def oai_chat_completions(user_prompt,
|
||||||
base_path,
|
base_path,
|
||||||
async_client,
|
async_client,
|
||||||
debug=False,
|
debug=False,
|
||||||
|
temperature=None,
|
||||||
model=None,
|
model=None,
|
||||||
n_predict=None,
|
n_predict=None,
|
||||||
enable_streaming=None,
|
enable_streaming=None,
|
||||||
|
@ -913,7 +930,8 @@ async def oai_chat_completions(user_prompt,
|
||||||
"model": model,
|
"model": model,
|
||||||
"max_tokens": n_predict,
|
"max_tokens": n_predict,
|
||||||
"stream": enable_streaming,
|
"stream": enable_streaming,
|
||||||
"seed": seed
|
"temperature": temperature if temperature is not None else 0.0,
|
||||||
|
"seed": seed,
|
||||||
}
|
}
|
||||||
if response_format is not None:
|
if response_format is not None:
|
||||||
payload['response_format'] = response_format
|
payload['response_format'] = response_format
|
||||||
|
@ -939,7 +957,7 @@ async def oai_chat_completions(user_prompt,
|
||||||
while event_received:
|
while event_received:
|
||||||
event_received = False
|
event_received = False
|
||||||
async for line_in_bytes in response.content:
|
async for line_in_bytes in response.content:
|
||||||
line = line_in_bytes.decode('utf8')
|
line = line_in_bytes.decode('utf-8')
|
||||||
line = line.rstrip('\n').rstrip('\r')
|
line = line.rstrip('\n').rstrip('\r')
|
||||||
if line == '':
|
if line == '':
|
||||||
continue
|
continue
|
||||||
|
@ -978,7 +996,8 @@ async def oai_chat_completions(user_prompt,
|
||||||
max_tokens=n_predict,
|
max_tokens=n_predict,
|
||||||
stream=enable_streaming,
|
stream=enable_streaming,
|
||||||
response_format=payload.get('response_format'),
|
response_format=payload.get('response_format'),
|
||||||
seed=seed
|
seed=seed,
|
||||||
|
temperature=payload['temperature']
|
||||||
)
|
)
|
||||||
except openai.error.AuthenticationError as e:
|
except openai.error.AuthenticationError as e:
|
||||||
if expect_api_error is not None and expect_api_error:
|
if expect_api_error is not None and expect_api_error:
|
||||||
|
@ -1120,6 +1139,23 @@ def assert_all_predictions_different(completion_responses):
|
||||||
assert content_i != content_j, "contents not different"
|
assert content_i != content_j, "contents not different"
|
||||||
|
|
||||||
|
|
||||||
|
def assert_all_token_probabilities_equal(completion_responses):
|
||||||
|
n_predict = len(completion_responses[0]['completion_probabilities'])
|
||||||
|
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
||||||
|
for pos in range(n_predict):
|
||||||
|
for i, response_i in enumerate(completion_responses):
|
||||||
|
probs_i = response_i['completion_probabilities'][pos]['probs']
|
||||||
|
print(f"pos {pos}, probs {i}: {probs_i}")
|
||||||
|
for pos in range(n_predict):
|
||||||
|
for i, response_i in enumerate(completion_responses):
|
||||||
|
probs_i = response_i['completion_probabilities'][pos]['probs']
|
||||||
|
for j, response_j in enumerate(completion_responses):
|
||||||
|
if i == j:
|
||||||
|
continue
|
||||||
|
probs_j = response_j['completion_probabilities'][pos]['probs']
|
||||||
|
assert probs_i == probs_j, "contents not equal"
|
||||||
|
|
||||||
|
|
||||||
async def gather_tasks_results(context):
|
async def gather_tasks_results(context):
|
||||||
n_tasks = len(context.concurrent_tasks)
|
n_tasks = len(context.concurrent_tasks)
|
||||||
if context.debug:
|
if context.debug:
|
||||||
|
@ -1258,6 +1294,8 @@ def start_server_background(context):
|
||||||
server_args.extend(['--batch-size', context.n_batch])
|
server_args.extend(['--batch-size', context.n_batch])
|
||||||
if context.n_ubatch:
|
if context.n_ubatch:
|
||||||
server_args.extend(['--ubatch-size', context.n_ubatch])
|
server_args.extend(['--ubatch-size', context.n_ubatch])
|
||||||
|
if context.n_threads:
|
||||||
|
server_args.extend(['--threads', context.threads])
|
||||||
if context.n_gpu_layer:
|
if context.n_gpu_layer:
|
||||||
server_args.extend(['--n-gpu-layers', context.n_gpu_layer])
|
server_args.extend(['--n-gpu-layers', context.n_gpu_layer])
|
||||||
if context.draft is not None:
|
if context.draft is not None:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue