Merge branch 'master' into patch-1
This commit is contained in:
commit
15c4e11574
21 changed files with 1064 additions and 261 deletions
17
.devops/full.Dockerfile
Normal file
17
.devops/full.Dockerfile
Normal file
|
@ -0,0 +1,17 @@
|
|||
ARG UBUNTU_VERSION=22.04
|
||||
|
||||
FROM ubuntu:$UBUNTU_VERSION as build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential python3 python3-pip
|
||||
|
||||
RUN pip install --upgrade pip setuptools wheel \
|
||||
&& pip install torch torchvision torchaudio sentencepiece numpy
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make
|
||||
|
||||
ENTRYPOINT ["/app/.devops/tools.sh"]
|
18
.devops/main.Dockerfile
Normal file
18
.devops/main.Dockerfile
Normal file
|
@ -0,0 +1,18 @@
|
|||
ARG UBUNTU_VERSION=22.04
|
||||
|
||||
FROM ubuntu:$UBUNTU_VERSION as build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make
|
||||
|
||||
FROM ubuntu:$UBUNTU_VERSION as runtime
|
||||
|
||||
COPY --from=build /app/main /main
|
||||
|
||||
ENTRYPOINT [ "/main" ]
|
46
.devops/tools.sh
Executable file
46
.devops/tools.sh
Executable file
|
@ -0,0 +1,46 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Read the first argument into a variable
|
||||
arg1="$1"
|
||||
|
||||
# Shift the arguments to remove the first one
|
||||
shift
|
||||
|
||||
# Join the remaining arguments into a single string
|
||||
arg2="$@"
|
||||
|
||||
if [[ $arg1 == '--convert' || $arg1 == '-c' ]]; then
|
||||
python3 ./convert-pth-to-ggml.py $arg2
|
||||
elif [[ $arg1 == '--quantize' || $arg1 == '-q' ]]; then
|
||||
./quantize $arg2
|
||||
elif [[ $arg1 == '--run' || $arg1 == '-r' ]]; then
|
||||
./main $arg2
|
||||
elif [[ $arg1 == '--download' || $arg1 == '-d' ]]; then
|
||||
python3 ./download-pth.py $arg2
|
||||
elif [[ $arg1 == '--all-in-one' || $arg1 == '-a' ]]; then
|
||||
echo "Downloading model..."
|
||||
python3 ./download-pth.py "$1" "$2"
|
||||
echo "Converting PTH to GGML..."
|
||||
for i in `ls $1/$2/ggml-model-f16.bin*`; do
|
||||
if [ -f "${i/f16/q4_0}" ]; then
|
||||
echo "Skip model quantization, it already exists: ${i/f16/q4_0}"
|
||||
else
|
||||
echo "Converting PTH to GGML: $i into ${i/f16/q4_0}..."
|
||||
./quantize "$i" "${i/f16/q4_0}" 2
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "Unknown command: $arg1"
|
||||
echo "Available commands: "
|
||||
echo " --run (-r): Run a model previously converted into ggml"
|
||||
echo " ex: -m /models/7B/ggml-model-q4_0.bin -p \"Building a website can be done in 10 simple steps:\" -n 512"
|
||||
echo " --convert (-c): Convert a llama model into ggml"
|
||||
echo " ex: \"/models/7B/\" 1"
|
||||
echo " --quantize (-q): Optimize with quantization process ggml"
|
||||
echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2"
|
||||
echo " --download (-d): Download original llama model from CDN: https://agi.gpt4.org/llama/"
|
||||
echo " ex: \"/models/\" 7B"
|
||||
echo " --all-in-one (-a): Execute --download, --convert & --quantize"
|
||||
echo " ex: \"/models/\" 7B"
|
||||
fi
|
24
.dockerignore
Normal file
24
.dockerignore
Normal file
|
@ -0,0 +1,24 @@
|
|||
*.o
|
||||
*.a
|
||||
.cache/
|
||||
.vs/
|
||||
.vscode/
|
||||
.DS_Store
|
||||
|
||||
build/
|
||||
build-em/
|
||||
build-debug/
|
||||
build-release/
|
||||
build-static/
|
||||
build-no-accel/
|
||||
build-sanitize-addr/
|
||||
build-sanitize-thread/
|
||||
|
||||
models/*
|
||||
|
||||
/main
|
||||
/quantize
|
||||
|
||||
arm_neon.h
|
||||
compile_commands.json
|
||||
Dockerfile
|
102
.github/workflows/build.yml
vendored
102
.github/workflows/build.yml
vendored
|
@ -1,8 +1,42 @@
|
|||
name: CI
|
||||
on: [push, pull_request]
|
||||
|
||||
on:
|
||||
workflow_dispatch: # allows manual triggering
|
||||
inputs:
|
||||
create_release:
|
||||
description: 'Create new release'
|
||||
required: true
|
||||
type: boolean
|
||||
push:
|
||||
paths: ['.github/workflows/**', 'CMakeLists.txt', 'Makefile', '**.h', '*.c', '**.cpp']
|
||||
pull_request:
|
||||
types: [opened, synchronize, edited, reopened, review_requested, ready_for_review]
|
||||
paths: ['CMakeLists.txt', 'Makefile', '**.h', '*.c', '**.cpp']
|
||||
|
||||
env:
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
|
||||
jobs:
|
||||
ubuntu-latest:
|
||||
ubuntu-latest-make:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential
|
||||
|
||||
- name: Build
|
||||
id: make_build
|
||||
run: |
|
||||
make
|
||||
|
||||
ubuntu-latest-cmake:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
@ -15,10 +49,31 @@ jobs:
|
|||
sudo apt-get install build-essential
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cmake --build . --config Release
|
||||
|
||||
macOS-latest-make:
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
brew update
|
||||
|
||||
- name: Build
|
||||
id: make_build
|
||||
run: |
|
||||
make
|
||||
|
||||
macOS-latest:
|
||||
macOS-latest-cmake:
|
||||
runs-on: macOS-latest
|
||||
|
||||
steps:
|
||||
|
@ -31,22 +86,59 @@ jobs:
|
|||
|
||||
- name: Build
|
||||
run: |
|
||||
make
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cmake --build . --config Release
|
||||
|
||||
windows-latest:
|
||||
windows-latest-cmake:
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cmake --build . --config Release
|
||||
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
|
||||
- name: Pack artifacts
|
||||
id: pack_artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
run: |
|
||||
7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-x64.zip .\build\Release\*
|
||||
|
||||
- name: Create release
|
||||
id: create_release
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: zendesk/action-create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}
|
||||
|
||||
- name: Upload release
|
||||
id: upload_release
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: .\llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-x64.zip
|
||||
asset_name: llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-x64.zip
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
# ubuntu-latest-gcc:
|
||||
# runs-on: ubuntu-latest
|
||||
#
|
||||
|
|
61
.github/workflows/docker.yml
vendored
Normal file
61
.github/workflows/docker.yml
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# GitHub recommends pinning actions to a commit SHA.
|
||||
# To get a newer version, you will need to update the SHA.
|
||||
# You can also reference a tag or branch, but the action may change without warning.
|
||||
|
||||
name: Publish Docker image
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
push_to_registry:
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
COMMIT_SHA: ${{ github.sha }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- { tag: "light", dockerfile: ".devops/main.Dockerfile" }
|
||||
- { tag: "full", dockerfile: ".devops/full.Dockerfile" }
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image (versioned)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
||||
file: ${{ matrix.config.dockerfile }}
|
||||
|
||||
- name: Build and push Docker image (tagged)
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name == 'push' }}
|
||||
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}"
|
||||
file: ${{ matrix.config.dockerfile }}
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -18,6 +18,10 @@ models/*
|
|||
|
||||
/main
|
||||
/quantize
|
||||
/result
|
||||
|
||||
arm_neon.h
|
||||
compile_commands.json
|
||||
|
||||
.envrc
|
||||
.direnv/
|
||||
|
|
|
@ -6,6 +6,10 @@ set(CMAKE_CXX_STANDARD_REQUIRED true)
|
|||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_C_STANDARD_REQUIRED true)
|
||||
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
|
||||
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
|
||||
|
@ -200,6 +204,7 @@ add_executable(llama main.cpp)
|
|||
add_executable(quantize quantize.cpp)
|
||||
|
||||
target_link_libraries(ggml PUBLIC Threads::Threads)
|
||||
target_link_libraries(ggml PRIVATE ${LLAMA_EXTRA_LIBS})
|
||||
target_include_directories(ggml PUBLIC .)
|
||||
target_link_libraries(llama ggml utils)
|
||||
target_link_libraries(quantize ggml utils)
|
||||
target_link_libraries(quantize ggml utils)
|
128
README.md
128
README.md
|
@ -3,10 +3,11 @@
|
|||
[](https://github.com/ggerganov/llama.cpp/actions)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
Inference of [Facebook's LLaMA](https://github.com/facebookresearch/llama) model in pure C/C++
|
||||
Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
|
||||
|
||||
**Hot topics:**
|
||||
|
||||
- [Added Alpaca support](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca)
|
||||
- Cache input prompts for faster initialization: https://github.com/ggerganov/llama.cpp/issues/64
|
||||
- Create a `llama.cpp` logo: https://github.com/ggerganov/llama.cpp/issues/105
|
||||
|
||||
|
@ -31,13 +32,14 @@ Supported platforms:
|
|||
- [X] Mac OS
|
||||
- [X] Linux
|
||||
- [X] Windows (via CMake)
|
||||
- [X] Docker
|
||||
|
||||
---
|
||||
|
||||
Here is a typical run using LLaMA-7B:
|
||||
|
||||
```java
|
||||
make -j && ./main -m ./models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -t 8 -n 512
|
||||
make -j && ./main -m ./models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
I llama.cpp build info:
|
||||
I UNAME_S: Darwin
|
||||
I UNAME_P: arm
|
||||
|
@ -148,12 +150,24 @@ python3 convert-pth-to-ggml.py models/7B/ 1
|
|||
./quantize.sh 7B
|
||||
|
||||
# run the inference
|
||||
./main -m ./models/7B/ggml-model-q4_0.bin -t 8 -n 128
|
||||
./main -m ./models/7B/ggml-model-q4_0.bin -n 128
|
||||
```
|
||||
|
||||
Currently, it's best to use Python 3.9 or Python 3.10, as `sentencepiece` has not yet published a wheel for Python 3.11.
|
||||
|
||||
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
||||
|
||||
TODO: add model disk/mem requirements
|
||||
### Memory/Disk Requirements
|
||||
|
||||
As the models are currently fully loaded into memory, you will need adequate disk space to save them
|
||||
and sufficient RAM to load them. At the moment, memory and disk requirements are the same.
|
||||
|
||||
| model | original size | quantized size (4-bit) |
|
||||
|-------|---------------|------------------------|
|
||||
| 7B | 13 GB | 3.9 GB |
|
||||
| 13B | 24 GB | 7.8 GB |
|
||||
| 30B | 60 GB | 19.5 GB |
|
||||
| 65B | 120 GB | 38.5 GB |
|
||||
|
||||
### Interactive mode
|
||||
|
||||
|
@ -162,35 +176,115 @@ In this mode, you can always interrupt generation by pressing Ctrl+C and enter o
|
|||
|
||||
Here is an example few-shot interaction, invoked with the command
|
||||
```
|
||||
./main -m ./models/13B/ggml-model-q4_0.bin -t 8 -n 256 --repeat_penalty 1.0 --color -i -r "User:" \
|
||||
-p \
|
||||
"Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
|
||||
|
||||
User: Hello, Bob.
|
||||
Bob: Hello. How may I help you today?
|
||||
User: Please tell me the largest city in Europe.
|
||||
Bob: Sure. The largest city in Europe is Moscow, the capital of Russia.
|
||||
User:"
|
||||
./main -m ./models/13B/ggml-model-q4_0.bin -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt
|
||||
|
||||
```
|
||||
Note the use of `--color` to distinguish between user input and generated text.
|
||||
|
||||

|
||||
|
||||
### Instruction mode with Alpaca
|
||||
|
||||
First, download the `ggml` Alpaca model into the `./models` folder:
|
||||
|
||||
```
|
||||
# use one of these
|
||||
# NOTE: these are copied from the alpaca.cpp repo - not sure how long these will work
|
||||
# TODO: add a script to simplify the download
|
||||
curl -o ggml-alpaca-7b-q4.bin -C - https://gateway.estuary.tech/gw/ipfs/QmQ1bf2BTnYxq73MFJWu1B7bQ2UD6qG7D7YDCxhTndVkPC
|
||||
curl -o ggml-alpaca-7b-q4.bin -C - https://ipfs.io/ipfs/QmQ1bf2BTnYxq73MFJWu1B7bQ2UD6qG7D7YDCxhTndVkPC
|
||||
curl -o ggml-alpaca-7b-q4.bin -C - https://cloudflare-ipfs.com/ipfs/QmQ1bf2BTnYxq73MFJWu1B7bQ2UD6qG7D7YDCxhTndVkPC
|
||||
```
|
||||
|
||||
Now run the `main` tool like this:
|
||||
|
||||
```
|
||||
./main -m ./models/ggml-alpaca-7b-q4.bin --color -f ./prompts/alpaca.txt -ins
|
||||
```
|
||||
|
||||
Sample run:
|
||||
|
||||
```
|
||||
== Running in interactive mode. ==
|
||||
- Press Ctrl+C to interject at any time.
|
||||
- Press Return to return control to LLaMa.
|
||||
- If you want to submit another line, end your input in '\'.
|
||||
|
||||
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
||||
|
||||
> How many letters are there in the English alphabet?
|
||||
There 26 letters in the English Alphabet
|
||||
> What is the most common way of transportation in Amsterdam?
|
||||
The majority (54%) are using public transit. This includes buses, trams and metros with over 100 lines throughout the city which make it very accessible for tourists to navigate around town as well as locals who commute by tram or metro on a daily basis
|
||||
> List 5 words that start with "ca".
|
||||
cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
|
||||
>
|
||||
```
|
||||
|
||||
### Android
|
||||
|
||||
You can easily run `llama.cpp` on Android device with [termux](https://play.google.com/store/apps/details?id=com.termux).
|
||||
First, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
|
||||
```
|
||||
$ mkdir build-android
|
||||
$ cd build-android
|
||||
$ export NDK=<your_ndk_directory>
|
||||
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
||||
$ make
|
||||
```
|
||||
Install [termux](https://play.google.com/store/apps/details?id=com.termux) on your device and run `termux-setup-storage` to get access to your SD card.
|
||||
Finally, copy the `llama` binary and the model files to your device storage. Here is a demo of an interactive session running on Pixel 5 phone:
|
||||
|
||||
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
||||
|
||||
### Docker
|
||||
|
||||
#### Prerequisites
|
||||
* Docker must be installed and running on your system.
|
||||
* Create a folder to store big models & intermediate files (in ex. im using /llama/models)
|
||||
|
||||
#### Images
|
||||
We have two Docker images available for this project:
|
||||
|
||||
1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization.
|
||||
2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file.
|
||||
|
||||
#### Usage
|
||||
|
||||
The easiest way to download the models, convert them to ggml and optimize them is with the --all-in-one command which includes the full docker image.
|
||||
|
||||
```bash
|
||||
docker run -v /llama/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B
|
||||
```
|
||||
|
||||
On complete, you are ready to play!
|
||||
|
||||
```bash
|
||||
docker run -v /llama/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
```
|
||||
|
||||
or with light image:
|
||||
|
||||
```bash
|
||||
docker run -v /llama/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
- We don't know yet how much the quantization affects the quality of the generated text
|
||||
- Probably the token sampling can be improved
|
||||
- The Accelerate framework is actually currently unused since I found that for tensor shapes typical for the Decoder,
|
||||
there is no benefit compared to the ARM_NEON intrinsics implementation. Of course, it's possible that I simlpy don't
|
||||
there is no benefit compared to the ARM_NEON intrinsics implementation. Of course, it's possible that I simply don't
|
||||
know how to utilize it properly. But in any case, you can even disable it with `LLAMA_NO_ACCELERATE=1 make` and the
|
||||
performance will be the same, since no BLAS calls are invoked by the current implementation
|
||||
|
||||
### Contributing
|
||||
|
||||
- Contributors can open PRs
|
||||
- Collaborators can push to branches in the `llama.cpp` repo
|
||||
- Collaborators can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch
|
||||
- Collaborators will be invited based on contributions
|
||||
- Any help with managing issues and PRs is very appreciated!
|
||||
- Make sure to read this: [Inference at the edge](https://github.com/ggerganov/llama.cpp/discussions/205)
|
||||
|
||||
### Coding guidelines
|
||||
|
||||
|
@ -200,7 +294,3 @@ Note the use of `--color` to distinguish between user input and generated text.
|
|||
- There are no strict rules for the code style, but try to follow the patterns in the code (indentation, spaces, etc.). Vertical alignment makes things more readable and easier to batch edit
|
||||
- Clean-up any trailing whitespaces, use 4 spaces indentation, brackets on same line, `void * ptr`, `int & a`
|
||||
- See [good first issues](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions
|
||||
|
||||
### Misc
|
||||
|
||||
- Practice your C++ typing skills: https://typing-battles.ggerganov.com
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# At the start of the ggml file we write the model parameters
|
||||
# and vocabulary.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import json
|
||||
import struct
|
||||
|
@ -24,132 +24,81 @@ import numpy as np
|
|||
import torch
|
||||
from sentencepiece import SentencePieceProcessor
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: convert-ckpt-to-ggml.py dir-model ftype\n")
|
||||
print(" ftype == 0 -> float32")
|
||||
print(" ftype == 1 -> float16")
|
||||
sys.exit(1)
|
||||
def parse_args():
|
||||
|
||||
# output in the same directory as the model
|
||||
dir_model = sys.argv[1]
|
||||
|
||||
fname_hparams = sys.argv[1] + "/params.json"
|
||||
fname_tokenizer = sys.argv[1] + "/../tokenizer.model"
|
||||
parser = argparse.ArgumentParser(description='Convert a LLaMA model checkpoint to a ggml compatible file')
|
||||
parser.add_argument('dir_model', help='directory containing the model checkpoint')
|
||||
parser.add_argument('ftype', type=int, choices=[0, 1], default=1, help='file type (0: float32, 1: float16)')
|
||||
return parser.parse_args()
|
||||
|
||||
def get_n_parts(dim):
|
||||
if dim == 4096:
|
||||
return 1
|
||||
elif dim == 5120:
|
||||
return 2
|
||||
elif dim == 6656:
|
||||
return 4
|
||||
elif dim == 8192:
|
||||
return 8
|
||||
else:
|
||||
print("Invalid dim: " + str(dim))
|
||||
|
||||
mappings = {4096: 1, 5120: 2, 6656: 4, 8192: 8}
|
||||
n_parts = mappings.get(dim)
|
||||
if n_parts is None:
|
||||
print(f"Invalid dim: {dim}")
|
||||
sys.exit(1)
|
||||
|
||||
# possible data types
|
||||
# ftype == 0 -> float32
|
||||
# ftype == 1 -> float16
|
||||
#
|
||||
# map from ftype to string
|
||||
ftype_str = ["f32", "f16"]
|
||||
print(f"n_parts = {n_parts}\n")
|
||||
return n_parts
|
||||
|
||||
ftype = 1
|
||||
if len(sys.argv) > 2:
|
||||
ftype = int(sys.argv[2])
|
||||
if ftype < 0 or ftype > 1:
|
||||
print("Invalid ftype: " + str(ftype))
|
||||
sys.exit(1)
|
||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
|
||||
def load_hparams_and_tokenizer(dir_model):
|
||||
|
||||
with open(fname_hparams, "r") as f:
|
||||
hparams = json.load(f)
|
||||
fname_hparams = f"{dir_model}/params.json"
|
||||
fname_tokenizer = f"{dir_model}/../tokenizer.model"
|
||||
|
||||
tokenizer = SentencePieceProcessor(fname_tokenizer)
|
||||
with open(fname_hparams, "r") as f:
|
||||
hparams = json.load(f)
|
||||
print(hparams)
|
||||
|
||||
hparams.update({"vocab_size": tokenizer.vocab_size()})
|
||||
tokenizer = SentencePieceProcessor(fname_tokenizer)
|
||||
hparams.update({"vocab_size": tokenizer.vocab_size()})
|
||||
|
||||
n_parts = get_n_parts(hparams["dim"])
|
||||
return hparams, tokenizer
|
||||
|
||||
print(hparams)
|
||||
print('n_parts = ', n_parts)
|
||||
def write_header(fout, hparams, ftype):
|
||||
|
||||
for p in range(n_parts):
|
||||
print('Processing part ', p)
|
||||
keys = ["vocab_size", "dim", "multiple_of", "n_heads", "n_layers"]
|
||||
values = [
|
||||
0x67676d6c, # magic: ggml in hex
|
||||
*[hparams[key] for key in keys],
|
||||
hparams["dim"] // hparams["n_heads"], # rot (obsolete)
|
||||
ftype
|
||||
]
|
||||
fout.write(struct.pack("i" * len(values), *values))
|
||||
|
||||
#fname_model = sys.argv[1] + "/consolidated.00.pth"
|
||||
fname_model = sys.argv[1] + "/consolidated.0" + str(p) + ".pth"
|
||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
|
||||
if (p > 0):
|
||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + "." + str(p)
|
||||
def write_tokens(fout, tokenizer):
|
||||
|
||||
model = torch.load(fname_model, map_location="cpu")
|
||||
|
||||
fout = open(fname_out, "wb")
|
||||
|
||||
fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex
|
||||
fout.write(struct.pack("i", hparams["vocab_size"]))
|
||||
fout.write(struct.pack("i", hparams["dim"]))
|
||||
fout.write(struct.pack("i", hparams["multiple_of"]))
|
||||
fout.write(struct.pack("i", hparams["n_heads"]))
|
||||
fout.write(struct.pack("i", hparams["n_layers"]))
|
||||
fout.write(struct.pack("i", hparams["dim"] // hparams["n_heads"])) # rot (obsolete)
|
||||
fout.write(struct.pack("i", ftype))
|
||||
|
||||
# Is this correct??
|
||||
for i in range(32000):
|
||||
for i in range(tokenizer.vocab_size()):
|
||||
if tokenizer.is_unknown(i):
|
||||
# "<unk>" token (translated as ??)
|
||||
text = " \u2047 ".encode("utf-8")
|
||||
fout.write(struct.pack("i", len(text)))
|
||||
fout.write(text)
|
||||
elif tokenizer.is_control(i):
|
||||
# "<s>"/"</s>" tokens
|
||||
fout.write(struct.pack("i", 0))
|
||||
text = b""
|
||||
elif tokenizer.is_byte(i):
|
||||
# "<U+XX>" tokens (which may be invalid UTF-8)
|
||||
piece = tokenizer.id_to_piece(i)
|
||||
if len(piece) != 6:
|
||||
print("Invalid token: " + piece)
|
||||
print(f"Invalid token: {piece}")
|
||||
sys.exit(1)
|
||||
byte_value = int(piece[3:-1], 16)
|
||||
fout.write(struct.pack("i", 1))
|
||||
fout.write(struct.pack("B", byte_value))
|
||||
text = struct.pack("B", byte_value)
|
||||
else:
|
||||
# normal token. Uses U+2581 (LOWER ONE EIGHTH BLOCK) to represent spaces.
|
||||
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
|
||||
fout.write(struct.pack("i", len(text)))
|
||||
fout.write(text)
|
||||
fout.write(struct.pack("i", len(text)))
|
||||
fout.write(text)
|
||||
|
||||
for k, v in model.items():
|
||||
name = k
|
||||
shape = v.shape
|
||||
def process_and_write_variables(fout, model, ftype):
|
||||
|
||||
# skip layers.X.attention.inner_attention.rope.freqs
|
||||
if name[-5:] == "freqs":
|
||||
for name, datao in model.items():
|
||||
|
||||
if name.endswith("freqs"):
|
||||
continue
|
||||
|
||||
print("Processing variable: " + name + " with shape: ", shape, " and type: ", v.dtype)
|
||||
shape = datao.shape
|
||||
|
||||
#data = tf.train.load_variable(dir_model, name).squeeze()
|
||||
data = v.numpy().squeeze()
|
||||
n_dims = len(data.shape);
|
||||
print(f"Processing variable: {name} with shape: {shape} and type: {datao.dtype}")
|
||||
|
||||
# for efficiency - transpose some matrices
|
||||
# "model/h.*/attn/c_attn/w"
|
||||
# "model/h.*/attn/c_proj/w"
|
||||
# "model/h.*/mlp/c_fc/w"
|
||||
# "model/h.*/mlp/c_proj/w"
|
||||
#if name[-14:] == "/attn/c_attn/w" or \
|
||||
# name[-14:] == "/attn/c_proj/w" or \
|
||||
# name[-11:] == "/mlp/c_fc/w" or \
|
||||
# name[-13:] == "/mlp/c_proj/w":
|
||||
# print(" Transposing")
|
||||
# data = data.transpose()
|
||||
|
||||
dshape = data.shape
|
||||
data = datao.numpy().squeeze()
|
||||
n_dims = len(shape)
|
||||
|
||||
# default type is fp16
|
||||
ftype_cur = 1
|
||||
|
@ -160,18 +109,40 @@ for p in range(n_parts):
|
|||
|
||||
# header
|
||||
sname = name.encode('utf-8')
|
||||
fout.write(struct.pack("iii", n_dims, len(sname), ftype_cur))
|
||||
for i in range(n_dims):
|
||||
fout.write(struct.pack("i", dshape[n_dims - 1 - i]))
|
||||
fout.write(sname);
|
||||
fout.write(struct.pack("iii", len(data.shape), len(sname), ftype_cur))
|
||||
for dim in reversed(data.shape):
|
||||
fout.write(struct.pack("i", dim))
|
||||
fout.write(sname)
|
||||
|
||||
# data
|
||||
# data output to file
|
||||
data.tofile(fout)
|
||||
|
||||
# I hope this deallocates the memory ..
|
||||
model = None
|
||||
def main():
|
||||
|
||||
fout.close()
|
||||
args = parse_args()
|
||||
dir_model = args.dir_model
|
||||
ftype = args.ftype
|
||||
ftype_str = ["f32", "f16"]
|
||||
|
||||
print("Done. Output file: " + fname_out + ", (part ", p, ")")
|
||||
print("")
|
||||
hparams, tokenizer = load_hparams_and_tokenizer(dir_model)
|
||||
n_parts = get_n_parts(hparams["dim"])
|
||||
|
||||
for p in range(n_parts):
|
||||
|
||||
print(f"Processing part {p}\n")
|
||||
|
||||
fname_model = f"{dir_model}/consolidated.0{p}.pth"
|
||||
fname_out = f"{dir_model}/ggml-model-{ftype_str[ftype]}.bin{'' if p == 0 else '.' + str(p)}"
|
||||
|
||||
model = torch.load(fname_model, map_location="cpu")
|
||||
|
||||
with open(fname_out, "wb") as fout:
|
||||
write_header(fout, hparams, ftype)
|
||||
write_tokens(fout, tokenizer)
|
||||
process_and_write_variables(fout, model, ftype)
|
||||
|
||||
del model
|
||||
print(f"Done. Output file: {fname_out}, (part {p})\n")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
66
download-pth.py
Normal file
66
download-pth.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
import os
|
||||
import sys
|
||||
from tqdm import tqdm
|
||||
import requests
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: download-pth.py dir-model model-type\n")
|
||||
print(" model-type: Available models 7B, 13B, 30B or 65B")
|
||||
sys.exit(1)
|
||||
|
||||
modelsDir = sys.argv[1]
|
||||
model = sys.argv[2]
|
||||
|
||||
num = {
|
||||
"7B": 1,
|
||||
"13B": 2,
|
||||
"30B": 4,
|
||||
"65B": 8,
|
||||
}
|
||||
|
||||
if model not in num:
|
||||
print(f"Error: model {model} is not valid, provide 7B, 13B, 30B or 65B")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Downloading model {model}")
|
||||
|
||||
files = ["checklist.chk", "params.json"]
|
||||
|
||||
for i in range(num[model]):
|
||||
files.append(f"consolidated.0{i}.pth")
|
||||
|
||||
resolved_path = os.path.abspath(os.path.join(modelsDir, model))
|
||||
os.makedirs(resolved_path, exist_ok=True)
|
||||
|
||||
for file in files:
|
||||
dest_path = os.path.join(resolved_path, file)
|
||||
|
||||
if os.path.exists(dest_path):
|
||||
print(f"Skip file download, it already exists: {file}")
|
||||
continue
|
||||
|
||||
url = f"https://agi.gpt4.org/llama/LLaMA/{model}/{file}"
|
||||
response = requests.get(url, stream=True)
|
||||
with open(dest_path, 'wb') as f:
|
||||
with tqdm(unit='B', unit_scale=True, miniters=1, desc=file) as t:
|
||||
for chunk in response.iter_content(chunk_size=1024):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
t.update(len(chunk))
|
||||
|
||||
files2 = ["tokenizer_checklist.chk", "tokenizer.model"]
|
||||
for file in files2:
|
||||
dest_path = os.path.join(modelsDir, file)
|
||||
|
||||
if os.path.exists(dest_path):
|
||||
print(f"Skip file download, it already exists: {file}")
|
||||
continue
|
||||
|
||||
url = f"https://agi.gpt4.org/llama/LLaMA/{file}"
|
||||
response = requests.get(url, stream=True)
|
||||
with open(dest_path, 'wb') as f:
|
||||
with tqdm(unit='B', unit_scale=True, miniters=1, desc=file) as t:
|
||||
for chunk in response.iter_content(chunk_size=1024):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
t.update(len(chunk))
|
43
flake.lock
generated
Normal file
43
flake.lock
generated
Normal file
|
@ -0,0 +1,43 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1676283394,
|
||||
"narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1678470307,
|
||||
"narHash": "sha256-OEeMUr3ueLIXyW/OaFUX5jUdimyQwMg/7e+/Q0gC/QE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "0c4800d579af4ed98ecc47d464a5e7b0870c4b1f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
48
flake.nix
Normal file
48
flake.nix
Normal file
|
@ -0,0 +1,48 @@
|
|||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
outputs = { self, nixpkgs, flake-utils }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
};
|
||||
llama-python = pkgs.python310.withPackages (ps: with ps; [
|
||||
torch
|
||||
numpy
|
||||
sentencepiece
|
||||
]);
|
||||
in
|
||||
{
|
||||
packages.default = pkgs.stdenv.mkDerivation {
|
||||
name = "llama.cpp";
|
||||
src = ./.;
|
||||
nativeBuildInputs = with pkgs; [ cmake ];
|
||||
buildInputs = with pkgs; lib.optionals stdenv.isDarwin [
|
||||
darwin.apple_sdk.frameworks.Accelerate
|
||||
];
|
||||
cmakeFlags = with pkgs; lib.optionals (system == "aarch64-darwin") [
|
||||
"-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
|
||||
];
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
mv llama $out/bin/llama
|
||||
mv quantize $out/bin/quantize
|
||||
echo "#!${llama-python}/bin/python" > $out/bin/convert-pth-to-ggml
|
||||
cat ${./convert-pth-to-ggml.py} >> $out/bin/convert-pth-to-ggml
|
||||
chmod +x $out/bin/convert-pth-to-ggml
|
||||
'';
|
||||
};
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = with pkgs; [
|
||||
cmake
|
||||
llama-python
|
||||
] ++ lib.optionals stdenv.isDarwin [
|
||||
darwin.apple_sdk.frameworks.Accelerate
|
||||
];
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
319
ggml.c
319
ggml.c
|
@ -364,7 +364,7 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
|||
#if __AVX2__
|
||||
// Unpack 32 4-bit fields into 32 bytes
|
||||
// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
|
||||
inline __m256i bytesFromNibbles( const uint8_t* rsi )
|
||||
static inline __m256i bytesFromNibbles( const uint8_t* rsi )
|
||||
{
|
||||
// Load 16 bytes from memory
|
||||
__m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi );
|
||||
|
@ -381,7 +381,7 @@ inline __m256i bytesFromNibbles( const uint8_t* rsi )
|
|||
return bytes;
|
||||
}
|
||||
|
||||
inline __m128i packNibbles( __m256i bytes )
|
||||
static inline __m128i packNibbles( __m256i bytes )
|
||||
{
|
||||
// Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
|
||||
const __m256i lowByte = _mm256_set1_epi16( 0xFF );
|
||||
|
@ -607,10 +607,11 @@ void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
|
|||
assert(k % QK == 0);
|
||||
|
||||
const int nb = k / QK;
|
||||
const size_t bs = 2*sizeof(float) + QK/2;
|
||||
|
||||
float * restrict pm = (float *) (y);
|
||||
float * restrict pd = (float *) (pm + nb);
|
||||
uint8_t * restrict pb = (uint8_t *) (pd + nb);
|
||||
uint8_t * restrict pd = ((uint8_t *)y + 0*bs);
|
||||
uint8_t * restrict pm = ((uint8_t *)y + 0*bs + sizeof(float));
|
||||
uint8_t * restrict pb = ((uint8_t *)y + 0*bs + 2*sizeof(float));
|
||||
|
||||
uint8_t pp[QK/2];
|
||||
|
||||
|
@ -627,8 +628,10 @@ void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
|
|||
const float d = (max - min) / ((1 << 4) - 1);
|
||||
const float id = d ? 1.0f/d : 0.0f;
|
||||
|
||||
pm[i] = min;
|
||||
pd[i] = d;
|
||||
*(float *)pm = min;
|
||||
*(float *)pd = d;
|
||||
pm += bs;
|
||||
pd += bs;
|
||||
|
||||
for (int l = 0; l < QK; l += 2) {
|
||||
const float v0 = (x[i*QK + l + 0] - min)*id;
|
||||
|
@ -643,7 +646,8 @@ void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
|
|||
pp[l/2] = vi0 | (vi1 << 4);
|
||||
}
|
||||
|
||||
memcpy(pb + i*QK/2, pp, sizeof(pp));
|
||||
memcpy(pb, pp, sizeof(pp));
|
||||
pb += bs;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -687,16 +691,17 @@ void dequantize_row_q4_1(const void * restrict x, float * restrict y, int k) {
|
|||
assert(k % QK == 0);
|
||||
|
||||
const int nb = k / QK;
|
||||
const size_t bs = 2*sizeof(float) + QK/2;
|
||||
|
||||
const float * restrict pm = (const float *) (x);
|
||||
const float * restrict pd = (const float *) (pm + nb);
|
||||
const uint8_t * restrict pb = (const uint8_t *) (pd + nb);
|
||||
const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs);
|
||||
const uint8_t * restrict pm = ((const uint8_t *)x + 0*bs + sizeof(float));
|
||||
const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + 2*sizeof(float));
|
||||
|
||||
for (int i = 0; i < nb; i++) {
|
||||
const float m = pm[i];
|
||||
const float d = pd[i];
|
||||
const float d = *(const float *) (pd + i*bs);
|
||||
const float m = *(const float *) (pm + i*bs);
|
||||
|
||||
const uint8_t * restrict pp = pb + i*QK/2;
|
||||
const uint8_t * restrict pp = pb + i*bs;
|
||||
|
||||
for (int l = 0; l < QK; l += 2) {
|
||||
const uint8_t vi = pp[l/2];
|
||||
|
@ -1359,8 +1364,8 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void
|
|||
const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
|
||||
const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b);
|
||||
|
||||
#if defined(__ARM_FEATURE_DOTPROD)
|
||||
// dot product into int16x8_t
|
||||
// assume that vdotq_s32 is always available, if not, should check for __ARM_FEATURE_DOTPROD
|
||||
int32x4_t p_0 = vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls);
|
||||
int32x4_t p_1 = vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls);
|
||||
|
||||
|
@ -1374,6 +1379,37 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void
|
|||
#else
|
||||
sum0 += d0_0*d1_0*(vgetq_lane_s32(p_0, 0) + vgetq_lane_s32(p_0, 1) + vgetq_lane_s32(p_0, 2) + vgetq_lane_s32(p_0, 3));
|
||||
sum1 += d0_1*d1_1*(vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3));
|
||||
#endif
|
||||
#else
|
||||
const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
|
||||
const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
|
||||
|
||||
const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));
|
||||
const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs));
|
||||
|
||||
const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls));
|
||||
const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls));
|
||||
|
||||
const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs));
|
||||
const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs));
|
||||
|
||||
const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h);
|
||||
const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h);
|
||||
|
||||
const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h);
|
||||
const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h);
|
||||
|
||||
const int16x8_t p_0 = vaddq_s16(pl_0, ph_0);
|
||||
const int16x8_t p_1 = vaddq_s16(pl_1, ph_1);
|
||||
|
||||
// scalar
|
||||
#if defined(__ARM_FEATURE_QRDMX)
|
||||
sum0 += d0_0*d1_0*vaddvq_s16(p_0);
|
||||
sum1 += d0_1*d1_1*vaddvq_s16(p_1);
|
||||
#else
|
||||
sum0 += d0_0*d1_0*(vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7));
|
||||
sum1 += d0_1*d1_1*(vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1553,28 +1589,109 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void
|
|||
inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict x, const void * restrict y) {
|
||||
const int nb = n / QK;
|
||||
|
||||
const float * restrict pm0 = (const float *) x;
|
||||
const float * restrict pm1 = (const float *) y;
|
||||
const size_t bs = 2*sizeof(float) + QK/2;
|
||||
|
||||
const float * restrict pd0 = (const float *) (pm0 + nb);
|
||||
const float * restrict pd1 = (const float *) (pm1 + nb);
|
||||
const uint8_t * restrict pd0 = ((const uint8_t *)x + 0*bs);
|
||||
const uint8_t * restrict pd1 = ((const uint8_t *)y + 0*bs);
|
||||
|
||||
const uint8_t * restrict pb0 = (const uint8_t *) (pd0 + nb);
|
||||
const uint8_t * restrict pb1 = (const uint8_t *) (pd1 + nb);
|
||||
const uint8_t * restrict pm0 = ((const uint8_t *)x + 0*bs + sizeof(float));
|
||||
const uint8_t * restrict pm1 = ((const uint8_t *)y + 0*bs + sizeof(float));
|
||||
|
||||
const uint8_t * restrict pb0 = ((const uint8_t *)x + 0*bs + 2*sizeof(float));
|
||||
const uint8_t * restrict pb1 = ((const uint8_t *)y + 0*bs + 2*sizeof(float));
|
||||
|
||||
float sumf = 0.0;
|
||||
|
||||
#if 1
|
||||
#if defined(__AVX2__)
|
||||
#if QK == 32
|
||||
// Initialize accumulator with zeros
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
// Accumulator for constant offsets
|
||||
float acc_offset = 0.0f;
|
||||
|
||||
// Main loop
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const float * m0 = (const float *) (pm0 + i*bs);
|
||||
const float * m1 = (const float *) (pm1 + i*bs);
|
||||
|
||||
const float * d0 = (const float *) (pd0 + i*bs);
|
||||
const float * d1 = (const float *) (pd1 + i*bs);
|
||||
|
||||
const uint8_t * restrict p0 = pb0 + i*bs;
|
||||
const uint8_t * restrict p1 = pb1 + i*bs;
|
||||
|
||||
const __m256 d0v = _mm256_broadcast_ss( d0 );
|
||||
const __m256 d1v = _mm256_broadcast_ss( d1 );
|
||||
const __m256 m0v = _mm256_broadcast_ss( m0 );
|
||||
const __m256 m1v = _mm256_broadcast_ss( m1 );
|
||||
|
||||
|
||||
// Compute combined scale for the block
|
||||
const __m256 scale_01 = _mm256_mul_ps( d0v, d1v );
|
||||
|
||||
// Compute cross scales for the block
|
||||
const __m256 scale_0 = _mm256_mul_ps( d0v, m1v );
|
||||
const __m256 scale_1 = _mm256_mul_ps( m0v, d1v );
|
||||
const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0b10101010 );
|
||||
|
||||
// Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
|
||||
__m256i bx = bytesFromNibbles( p0 );
|
||||
__m256i by = bytesFromNibbles( p1 );
|
||||
|
||||
// Now we have a vector with bytes in [ 0 .. 15 ] interval.
|
||||
|
||||
// Sign-extend first 16 signed bytes into int16_t
|
||||
__m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) );
|
||||
__m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) );
|
||||
// Compute products of int16_t integers, add pairwise
|
||||
__m256i i32 = _mm256_madd_epi16( x16, y16 );
|
||||
|
||||
// Sign-extend last 16 signed bytes into int16_t vectors
|
||||
__m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) );
|
||||
__m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) );
|
||||
// Accumulate products of int16_t integers
|
||||
i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) );
|
||||
|
||||
// compute sums of unsigned bytes in bx, by in blocks of 8.
|
||||
// This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000,
|
||||
// which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400.
|
||||
// so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ]
|
||||
__m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() );
|
||||
__m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() );
|
||||
__m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) );
|
||||
__m256 sums = _mm256_cvtepi32_ps( sumsi );
|
||||
|
||||
// Convert int32_t to float
|
||||
__m256 p = _mm256_cvtepi32_ps( i32 );
|
||||
// Apply the scale, and accumulate
|
||||
// acc += d0*d1*x*y + d0*m1*x + d1*m0*y
|
||||
acc = _mm256_fmadd_ps( scale_01, p, acc );
|
||||
acc = _mm256_fmadd_ps( cross_scales, sums, acc );
|
||||
// acc_offset += m0*m1 (for each entry in the block)
|
||||
acc_offset += (*m0)*(*m1);
|
||||
}
|
||||
|
||||
// Return horizontal sum of the acc vector
|
||||
__m128 res = _mm256_extractf128_ps( acc, 1 );
|
||||
res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
|
||||
res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
|
||||
res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
|
||||
|
||||
sumf = _mm_cvtss_f32( res ) + acc_offset * QK;
|
||||
#else
|
||||
#error "not implemented for QK"
|
||||
#endif
|
||||
#else
|
||||
// scalar
|
||||
for (int i = 0; i < nb; i++) {
|
||||
const float m0 = pm0[i];
|
||||
const float m1 = pm1[i];
|
||||
const float m0 = *(const float *) (pm0 + i*bs);
|
||||
const float m1 = *(const float *) (pm1 + i*bs);
|
||||
|
||||
const float d0 = pd0[i];
|
||||
const float d1 = pd1[i];
|
||||
const float d0 = *(const float *) (pd0 + i*bs);
|
||||
const float d1 = *(const float *) (pd1 + i*bs);
|
||||
|
||||
const uint8_t * restrict p0 = pb0 + i*QK/2;
|
||||
const uint8_t * restrict p1 = pb1 + i*QK/2;
|
||||
const uint8_t * restrict p0 = pb0 + i*bs;
|
||||
const uint8_t * restrict p1 = pb1 + i*bs;
|
||||
|
||||
for (int j = 0; j < QK/2; j++) {
|
||||
const uint8_t v0 = p0[j];
|
||||
|
@ -1808,16 +1925,17 @@ inline static void ggml_vec_mad_q4_1(const int n, float * restrict y, void * res
|
|||
assert(n % QK == 0);
|
||||
|
||||
const int nb = n / QK;
|
||||
const size_t bs = 2*sizeof(float) + QK/2;
|
||||
|
||||
const float * restrict pm = (const float *) (x);
|
||||
const float * restrict pd = (const float *) (pm + nb);
|
||||
const uint8_t * restrict pb = (const uint8_t *) (pd + nb);
|
||||
const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs);
|
||||
const uint8_t * restrict pm = ((const uint8_t *)x + 0*bs + sizeof(float));
|
||||
const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + 2*sizeof(float));
|
||||
|
||||
for (int i = 0; i < nb; i++) {
|
||||
const float m = pm[i];
|
||||
const float d = pd[i];
|
||||
const float d = *(const float *) (pd + i*bs);
|
||||
const float m = *(const float *) (pm + i*bs);
|
||||
|
||||
const uint8_t * restrict pp = pb + i*QK/2;
|
||||
const uint8_t * restrict pp = pb + i*bs;
|
||||
|
||||
for (int l = 0; l < QK; l += 2) {
|
||||
const uint8_t vi = pp[l/2];
|
||||
|
@ -2038,6 +2156,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
|
|||
"GELU",
|
||||
"SILU",
|
||||
"NORM",
|
||||
"RMS_NORM",
|
||||
|
||||
"MUL_MAT",
|
||||
|
||||
|
@ -2058,7 +2177,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
|
|||
"FLASH_FF",
|
||||
};
|
||||
|
||||
static_assert(GGML_OP_COUNT == 34, "GGML_OP_COUNT != 34");
|
||||
static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
|
||||
|
||||
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"none",
|
||||
|
@ -2081,6 +2200,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
|||
"gelu(x)",
|
||||
"silu(x)",
|
||||
"norm(x)",
|
||||
"rms_norm(x)",
|
||||
|
||||
"X*Y",
|
||||
|
||||
|
@ -2101,7 +2221,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
|||
"flash_ff(x)",
|
||||
};
|
||||
|
||||
static_assert(GGML_OP_COUNT == 34, "GGML_OP_COUNT != 34");
|
||||
static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
|
||||
|
||||
//
|
||||
// ggml object
|
||||
|
@ -3587,6 +3707,39 @@ struct ggml_tensor * ggml_norm_inplace(
|
|||
return ggml_norm_impl(ctx, a, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rms_norm_impl(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
bool inplace) {
|
||||
bool is_node = false;
|
||||
|
||||
if (!inplace && (a->grad)) {
|
||||
GGML_ASSERT(false); // TODO: implement backward
|
||||
is_node = true;
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
result->op = GGML_OP_RMS_NORM;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src0 = a;
|
||||
result->src1 = NULL; // TODO: maybe store epsilon here?
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rms_norm(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a) {
|
||||
return ggml_rms_norm_impl(ctx, a, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rms_norm_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a) {
|
||||
return ggml_rms_norm_impl(ctx, a, true);
|
||||
}
|
||||
|
||||
// ggml_mul_mat
|
||||
|
||||
struct ggml_tensor * ggml_mul_mat(
|
||||
|
@ -5375,6 +5528,87 @@ static void ggml_compute_forward_norm(
|
|||
}
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_rms_norm_f32(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
struct ggml_tensor * dst) {
|
||||
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
||||
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
return;
|
||||
}
|
||||
|
||||
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
||||
|
||||
const int ith = params->ith;
|
||||
const int nth = params->nth;
|
||||
|
||||
const int ne00 = src0->ne[0];
|
||||
const int ne01 = src0->ne[1];
|
||||
const int ne02 = src0->ne[2];
|
||||
const int ne03 = src0->ne[3];
|
||||
|
||||
const size_t nb01 = src0->nb[1];
|
||||
const size_t nb02 = src0->nb[2];
|
||||
const size_t nb03 = src0->nb[3];
|
||||
|
||||
const size_t nb1 = dst->nb[1];
|
||||
const size_t nb2 = dst->nb[2];
|
||||
const size_t nb3 = dst->nb[3];
|
||||
|
||||
const ggml_float eps = 1e-6f; // TODO: make this a parameter
|
||||
|
||||
// TODO: optimize
|
||||
for (int i03 = 0; i03 < ne03; i03++) {
|
||||
for (int i02 = 0; i02 < ne02; i02++) {
|
||||
for (int i01 = ith; i01 < ne01; i01 += nth) {
|
||||
const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
|
||||
|
||||
ggml_float mean = 0.0;
|
||||
for (int i00 = 0; i00 < ne00; i00++) {
|
||||
mean += x[i00] * x[i00];
|
||||
}
|
||||
|
||||
mean /= ne00;
|
||||
|
||||
float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
|
||||
|
||||
memcpy(y, x, ne00 * sizeof(float));
|
||||
// for (int i00 = 0; i00 < ne00; i00++) {
|
||||
// y[i00] = x[i00];
|
||||
// }
|
||||
|
||||
const float scale = 1.0/sqrt(mean + eps);
|
||||
|
||||
ggml_vec_scale_f32(ne00, y, scale);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_rms_norm(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
struct ggml_tensor * dst) {
|
||||
switch (src0->type) {
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
ggml_compute_forward_rms_norm_f32(params, src0, dst);
|
||||
} break;
|
||||
case GGML_TYPE_Q4_0:
|
||||
case GGML_TYPE_Q4_1:
|
||||
case GGML_TYPE_I8:
|
||||
case GGML_TYPE_I16:
|
||||
case GGML_TYPE_I32:
|
||||
case GGML_TYPE_F16:
|
||||
case GGML_TYPE_COUNT:
|
||||
{
|
||||
GGML_ASSERT(false);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ggml_compute_forward_mul_mat
|
||||
|
||||
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
||||
|
@ -8491,6 +8725,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
|||
{
|
||||
ggml_compute_forward_norm(params, tensor->src0, tensor);
|
||||
} break;
|
||||
case GGML_OP_RMS_NORM:
|
||||
{
|
||||
ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
|
||||
} break;
|
||||
case GGML_OP_MUL_MAT:
|
||||
{
|
||||
ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
|
||||
|
@ -8733,6 +8971,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||
{
|
||||
GGML_ASSERT(false); // TODO: not implemented
|
||||
} break;
|
||||
case GGML_OP_RMS_NORM:
|
||||
{
|
||||
GGML_ASSERT(false); // TODO: not implemented
|
||||
} break;
|
||||
case GGML_OP_MUL_MAT:
|
||||
{
|
||||
if (src0->grad) {
|
||||
|
@ -9076,10 +9318,6 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
|||
}
|
||||
|
||||
void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
|
||||
if (cgraph->n_threads <= 0) {
|
||||
cgraph->n_threads = 8;
|
||||
}
|
||||
|
||||
const int n_threads = cgraph->n_threads;
|
||||
|
||||
struct ggml_compute_state_shared state_shared = {
|
||||
|
@ -9159,6 +9397,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
|
|||
node->n_tasks = n_threads;
|
||||
} break;
|
||||
case GGML_OP_NORM:
|
||||
case GGML_OP_RMS_NORM:
|
||||
{
|
||||
node->n_tasks = n_threads;
|
||||
} break;
|
||||
|
|
5
ggml.h
5
ggml.h
|
@ -230,6 +230,7 @@ enum ggml_op {
|
|||
GGML_OP_GELU,
|
||||
GGML_OP_SILU,
|
||||
GGML_OP_NORM, // normalize
|
||||
GGML_OP_RMS_NORM,
|
||||
|
||||
GGML_OP_MUL_MAT,
|
||||
|
||||
|
@ -482,6 +483,10 @@ struct ggml_tensor * ggml_norm(
|
|||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
struct ggml_tensor * ggml_rms_norm(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// A: m rows, n columns
|
||||
// B: p rows, n columns (i.e. we transpose it internally)
|
||||
// result is m columns, p rows
|
||||
|
|
109
main.cpp
109
main.cpp
|
@ -14,6 +14,8 @@
|
|||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#elif defined (_WIN32)
|
||||
#include <signal.h>
|
||||
#endif
|
||||
|
||||
#define ANSI_COLOR_RED "\x1b[31m"
|
||||
|
@ -141,16 +143,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|||
|
||||
// load vocab
|
||||
{
|
||||
const int32_t n_vocab = model.hparams.n_vocab;
|
||||
|
||||
if (n_vocab != model.hparams.n_vocab) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
||||
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string word;
|
||||
for (int i = 0; i < n_vocab; i++) {
|
||||
for (int i = 0; i < model.hparams.n_vocab; i++) {
|
||||
uint32_t len;
|
||||
fin.read((char *) &len, sizeof(len));
|
||||
|
||||
|
@ -182,8 +176,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|||
}
|
||||
}
|
||||
|
||||
const ggml_type wtype2 = GGML_TYPE_F32;
|
||||
|
||||
auto & ctx = model.ctx;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
|
@ -243,7 +235,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
model.layers.resize(n_layer);
|
||||
|
@ -545,8 +536,8 @@ bool llama_eval(
|
|||
const int n_vocab = hparams.n_vocab;
|
||||
const int n_rot = hparams.n_embd/hparams.n_head;
|
||||
|
||||
const int d_key = n_embd/n_head;
|
||||
|
||||
// TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
|
||||
// static size_t buf_size = hparams.n_ctx*1024*1024;
|
||||
static size_t buf_size = 512u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
|
@ -584,7 +575,7 @@ bool llama_eval(
|
|||
|
||||
// norm
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
cur = ggml_rms_norm(ctx0, inpL);
|
||||
|
||||
// cur = attention_norm*cur
|
||||
cur = ggml_mul(ctx0,
|
||||
|
@ -674,7 +665,7 @@ bool llama_eval(
|
|||
{
|
||||
// norm
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpFF);
|
||||
cur = ggml_rms_norm(ctx0, inpFF);
|
||||
|
||||
// cur = ffn_norm*cur
|
||||
cur = ggml_mul(ctx0,
|
||||
|
@ -709,7 +700,7 @@ bool llama_eval(
|
|||
|
||||
// norm
|
||||
{
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
inpL = ggml_rms_norm(ctx0, inpL);
|
||||
|
||||
// inpL = norm*inpL
|
||||
inpL = ggml_mul(ctx0,
|
||||
|
@ -753,8 +744,9 @@ bool llama_eval(
|
|||
|
||||
static bool is_interacting = false;
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
void sigint_handler(int signo) {
|
||||
printf(ANSI_COLOR_RESET);
|
||||
if (signo == SIGINT) {
|
||||
if (!is_interacting) {
|
||||
is_interacting=true;
|
||||
|
@ -796,6 +788,11 @@ int main(int argc, char ** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
|
||||
if (params.seed < 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
@ -818,8 +815,7 @@ int main(int argc, char ** argv) {
|
|||
// load the model
|
||||
{
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!llama_model_load(params.model, model, vocab, 512)) { // TODO: set context from user input ??
|
||||
if (!llama_model_load(params.model, model, vocab, params.n_ctx)) {
|
||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
@ -841,14 +837,31 @@ int main(int argc, char ** argv) {
|
|||
|
||||
std::vector<float> logits;
|
||||
|
||||
// Add a space in front of the first character to match OG llama tokenizer behavior
|
||||
params.prompt.insert(0, 1, ' ');
|
||||
// tokenize the prompt
|
||||
std::vector<gpt_vocab::id> embd_inp = ::llama_tokenize(vocab, params.prompt, true);
|
||||
|
||||
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
||||
|
||||
// prefix & suffix for instruct mode
|
||||
const std::vector<gpt_vocab::id> inp_pfx = ::llama_tokenize(vocab, "\n\n### Instruction:\n\n", true);
|
||||
const std::vector<gpt_vocab::id> inp_sfx = ::llama_tokenize(vocab, "\n\n### Response:\n\n", false);
|
||||
|
||||
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
||||
if (params.instruct) {
|
||||
params.interactive = true;
|
||||
params.antiprompt = "### Instruction:\n\n";
|
||||
}
|
||||
|
||||
// tokenize the reverse prompt
|
||||
std::vector<gpt_vocab::id> antiprompt_inp = ::llama_tokenize(vocab, params.antiprompt, false);
|
||||
|
||||
// enable interactive mode if reverse prompt is specified
|
||||
if (!antiprompt_inp.empty()) {
|
||||
params.interactive = true;
|
||||
}
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
|
@ -863,11 +876,13 @@ int main(int argc, char ** argv) {
|
|||
sigemptyset (&sigint_action.sa_mask);
|
||||
sigint_action.sa_flags = 0;
|
||||
sigaction(SIGINT, &sigint_action, NULL);
|
||||
#elif defined (_WIN32)
|
||||
signal(SIGINT, sigint_handler);
|
||||
#endif
|
||||
|
||||
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
||||
|
||||
if(antiprompt_inp.size()) {
|
||||
if (antiprompt_inp.size()) {
|
||||
fprintf(stderr, "%s: reverse prompt: '%s'\n", __func__, params.antiprompt.c_str());
|
||||
fprintf(stderr, "%s: number of tokens in reverse prompt = %zu\n", __func__, antiprompt_inp.size());
|
||||
for (int i = 0; i < (int) antiprompt_inp.size(); i++) {
|
||||
|
@ -889,31 +904,27 @@ int main(int argc, char ** argv) {
|
|||
std::vector<gpt_vocab::id> last_n_tokens(last_n_size);
|
||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||
|
||||
|
||||
if (params.interactive) {
|
||||
fprintf(stderr, "== Running in interactive mode. ==\n"
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
" - Press Ctrl+C to interject at any time.\n"
|
||||
#endif
|
||||
" - Press Return to return control to LLaMa.\n"
|
||||
" - If you want to submit another line, end your input in '\\'.\n");
|
||||
" - If you want to submit another line, end your input in '\\'.\n\n");
|
||||
is_interacting = true;
|
||||
}
|
||||
|
||||
int remaining_tokens = params.n_predict;
|
||||
int input_consumed = 0;
|
||||
bool input_noecho = false;
|
||||
|
||||
// prompt user immediately after the starting prompt has been loaded
|
||||
if (params.interactive_start) {
|
||||
is_interacting = true;
|
||||
}
|
||||
int remaining_tokens = params.n_predict;
|
||||
|
||||
// set the color for the prompt which will be output initially
|
||||
if (params.use_color) {
|
||||
printf(ANSI_COLOR_YELLOW);
|
||||
}
|
||||
|
||||
while (remaining_tokens > 0) {
|
||||
while (remaining_tokens > 0 || params.interactive) {
|
||||
// predict
|
||||
if (embd.size() > 0) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
@ -966,13 +977,13 @@ int main(int argc, char ** argv) {
|
|||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[input_consumed]);
|
||||
++input_consumed;
|
||||
if (embd.size() > params.n_batch) {
|
||||
if ((int) embd.size() >= params.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// reset color to default if we there is no pending user input
|
||||
if (!input_noecho && params.use_color && embd_inp.size() == input_consumed) {
|
||||
if (!input_noecho && params.use_color && (int) embd_inp.size() == input_consumed) {
|
||||
printf(ANSI_COLOR_RESET);
|
||||
}
|
||||
}
|
||||
|
@ -994,19 +1005,26 @@ int main(int argc, char ** argv) {
|
|||
is_interacting = true;
|
||||
}
|
||||
if (is_interacting) {
|
||||
if (params.instruct) {
|
||||
input_consumed = embd_inp.size();
|
||||
embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
|
||||
|
||||
printf("\n> ");
|
||||
}
|
||||
|
||||
// currently being interactive
|
||||
bool another_line=true;
|
||||
bool another_line = true;
|
||||
while (another_line) {
|
||||
fflush(stdout);
|
||||
char buf[256] = {0};
|
||||
int n_read;
|
||||
if(params.use_color) printf(ANSI_BOLD ANSI_COLOR_GREEN);
|
||||
if (params.use_color) printf(ANSI_BOLD ANSI_COLOR_GREEN);
|
||||
if (scanf("%255[^\n]%n%*c", buf, &n_read) <= 0) {
|
||||
// presumable empty line, consume the newline
|
||||
scanf("%*c");
|
||||
std::ignore = scanf("%*c");
|
||||
n_read=0;
|
||||
}
|
||||
if(params.use_color) printf(ANSI_COLOR_RESET);
|
||||
if (params.use_color) printf(ANSI_COLOR_RESET);
|
||||
|
||||
if (n_read > 0 && buf[n_read-1]=='\\') {
|
||||
another_line = true;
|
||||
|
@ -1021,6 +1039,10 @@ int main(int argc, char ** argv) {
|
|||
std::vector<gpt_vocab::id> line_inp = ::llama_tokenize(vocab, buf, false);
|
||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||
|
||||
if (params.instruct) {
|
||||
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
||||
}
|
||||
|
||||
remaining_tokens -= line_inp.size();
|
||||
|
||||
input_noecho = true; // do not echo this again
|
||||
|
@ -1032,11 +1054,18 @@ int main(int argc, char ** argv) {
|
|||
|
||||
// end of text token
|
||||
if (embd.back() == 2) {
|
||||
fprintf(stderr, " [end of text]\n");
|
||||
break;
|
||||
if (params.interactive) {
|
||||
is_interacting = true;
|
||||
} else {
|
||||
fprintf(stderr, " [end of text]\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (_WIN32)
|
||||
signal(SIGINT, SIG_DFL);
|
||||
#endif
|
||||
|
||||
// report timing
|
||||
{
|
||||
|
@ -1052,5 +1081,9 @@ int main(int argc, char ** argv) {
|
|||
|
||||
ggml_free(model.ctx);
|
||||
|
||||
if (params.use_color) {
|
||||
printf(ANSI_COLOR_RESET);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
0
models/.gitignore
vendored
0
models/.gitignore
vendored
1
prompts/alpaca.txt
Normal file
1
prompts/alpaca.txt
Normal file
|
@ -0,0 +1 @@
|
|||
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
7
prompts/chat-with-bob.txt
Normal file
7
prompts/chat-with-bob.txt
Normal file
|
@ -0,0 +1,7 @@
|
|||
Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
|
||||
|
||||
User: Hello, Bob.
|
||||
Bob: Hello. How may I help you today?
|
||||
User: Please tell me the largest city in Europe.
|
||||
Bob: Sure. The largest city in Europe is Moscow, the capital of Russia.
|
||||
User:
|
126
utils.cpp
126
utils.cpp
|
@ -16,6 +16,18 @@
|
|||
#endif
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
// determine sensible default number of threads.
|
||||
// std::thread::hardware_concurrency may not be equal to the number of cores, or may return 0.
|
||||
#ifdef __linux__
|
||||
std::ifstream cpuinfo("/proc/cpuinfo");
|
||||
params.n_threads = std::count(std::istream_iterator<std::string>(cpuinfo),
|
||||
std::istream_iterator<std::string>(),
|
||||
std::string("processor"));
|
||||
#endif
|
||||
if (params.n_threads == 0) {
|
||||
params.n_threads = std::max(1, (int32_t) std::thread::hardware_concurrency());
|
||||
}
|
||||
|
||||
for (int i = 1; i < argc; i++) {
|
||||
std::string arg = argv[i];
|
||||
|
||||
|
@ -26,17 +38,17 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
} else if (arg == "-p" || arg == "--prompt") {
|
||||
params.prompt = argv[++i];
|
||||
} else if (arg == "-f" || arg == "--file") {
|
||||
|
||||
std::ifstream file(argv[++i]);
|
||||
|
||||
std::copy(std::istreambuf_iterator<char>(file),
|
||||
std::istreambuf_iterator<char>(),
|
||||
back_inserter(params.prompt));
|
||||
|
||||
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
|
||||
if (params.prompt.back() == '\n') {
|
||||
params.prompt.pop_back();
|
||||
}
|
||||
} else if (arg == "-n" || arg == "--n_predict") {
|
||||
params.n_predict = std::stoi(argv[++i]);
|
||||
} else if (arg == "--top_k") {
|
||||
params.top_k = std::stoi(argv[++i]);
|
||||
} else if (arg == "-c" || arg == "--ctx_size") {
|
||||
params.n_ctx = std::stoi(argv[++i]);
|
||||
} else if (arg == "--top_p") {
|
||||
params.top_p = std::stof(argv[++i]);
|
||||
} else if (arg == "--temp") {
|
||||
|
@ -51,9 +63,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
params.model = argv[++i];
|
||||
} else if (arg == "-i" || arg == "--interactive") {
|
||||
params.interactive = true;
|
||||
} else if (arg == "--interactive-start") {
|
||||
params.interactive = true;
|
||||
params.interactive_start = true;
|
||||
} else if (arg == "-ins" || arg == "--instruct") {
|
||||
params.instruct = true;
|
||||
} else if (arg == "--color") {
|
||||
params.use_color = true;
|
||||
} else if (arg == "-r" || arg == "--reverse-prompt") {
|
||||
|
@ -71,13 +82,13 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void gpt_print_usage(int argc, char ** argv, const gpt_params & params) {
|
||||
void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
fprintf(stderr, "usage: %s [options]\n", argv[0]);
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "options:\n");
|
||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||
fprintf(stderr, " -i, --interactive run in interactive mode\n");
|
||||
fprintf(stderr, " --interactive-start run in interactive mode and poll user input at startup\n");
|
||||
fprintf(stderr, " -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
||||
fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n");
|
||||
fprintf(stderr, " in interactive mode, poll user input upon seeing PROMPT\n");
|
||||
fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n");
|
||||
|
@ -92,6 +103,7 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) {
|
|||
fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p);
|
||||
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
|
||||
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty);
|
||||
fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
|
||||
fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp);
|
||||
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
||||
|
@ -272,40 +284,56 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
|
|||
return tokens;
|
||||
}
|
||||
|
||||
// TODO: Calculate this constant from the vocabulary
|
||||
#define MAX_TOKEN_LEN 18
|
||||
// SentencePiece implementation after https://guillaume-be.github.io/2020-05-30/sentence_piece
|
||||
std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::string & text, bool bos) {
|
||||
//auto res = gpt_tokenize(vocab, text);
|
||||
|
||||
//if (bos) {
|
||||
// res.insert(res.begin(), 1); // TODO: replace with vocab.bos
|
||||
//}
|
||||
|
||||
std::vector<gpt_vocab::id> res;
|
||||
std::vector<int> score;
|
||||
std::vector<gpt_vocab::id> prev;
|
||||
int len = text.length();
|
||||
|
||||
score.resize(len + 1);
|
||||
prev.resize(len + 1);
|
||||
|
||||
// Forward pass
|
||||
for (int i = 0; i < len; i++) {
|
||||
int max_len = std::min(len - i, MAX_TOKEN_LEN);
|
||||
for (int sub_len = 1; sub_len <= max_len; sub_len++) {
|
||||
auto sub = text.substr(i, sub_len);
|
||||
auto token = vocab.token_to_id.find(sub);
|
||||
if (token != vocab.token_to_id.end()) {
|
||||
int token_score = sub.length() * sub.length();
|
||||
int local_score = score[i] + token_score;
|
||||
int next = i + sub_len;
|
||||
if (score[next] < local_score) {
|
||||
score[next] = local_score;
|
||||
prev[next] = (*token).second;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Backward pass
|
||||
int i = len;
|
||||
while (i > 0) {
|
||||
gpt_vocab::id token_id = prev[i];
|
||||
if (token_id == 0) {
|
||||
// TODO: Return error or something more meaningful
|
||||
printf("failed to tokenize string!\n");
|
||||
break;
|
||||
}
|
||||
res.push_back(token_id);
|
||||
auto token = (*vocab.id_to_token.find(token_id)).second;
|
||||
i -= token.length();
|
||||
}
|
||||
|
||||
if (bos) {
|
||||
res.push_back(1); // TODO: replace with vocab.bos
|
||||
}
|
||||
|
||||
//find the longest token that matches the text
|
||||
int pos = 0;
|
||||
while (true) {
|
||||
int l = 0;
|
||||
int t = 0;
|
||||
for (const auto & kv : vocab.id_to_token) {
|
||||
if (kv.second.size() < l) continue;
|
||||
if (kv.second.size() > text.size() - pos) continue;
|
||||
if (text.substr(pos, kv.second.size()) == kv.second) {
|
||||
l = kv.second.size();
|
||||
t = kv.first;
|
||||
}
|
||||
}
|
||||
|
||||
if (l == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
res.push_back(t);
|
||||
pos += l;
|
||||
}
|
||||
// Pieces are in reverse order so correct that
|
||||
std::reverse(res.begin(), res.end());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -367,7 +395,7 @@ gpt_vocab::id llama_sample_top_p_top_k(
|
|||
logits_id.push_back(std::make_pair(logits[i]*scale*repeat_penalty, i));
|
||||
} else {
|
||||
logits_id.push_back(std::make_pair(logits[i]*scale/repeat_penalty, i));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logits_id.push_back(std::make_pair(logits[i]*scale, i));
|
||||
}
|
||||
|
@ -486,7 +514,8 @@ size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t
|
|||
|
||||
size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t * hist) {
|
||||
const int nb = k / qk;
|
||||
const size_t row_size = nb*(2*sizeof(float) + sizeof(uint8_t)*qk/2);
|
||||
const size_t bs = (2*sizeof(float) + sizeof(uint8_t)*qk/2);
|
||||
const size_t row_size = nb*bs;
|
||||
|
||||
assert(k % qk == 0);
|
||||
|
||||
|
@ -495,10 +524,10 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t
|
|||
|
||||
char * pdst = (char *) dst;
|
||||
|
||||
for (int j = 0; j < n; j += k) {
|
||||
float * pm = (float *) (pdst + (j/k)*row_size);
|
||||
float * pd = (float *) (pm + nb);
|
||||
uint8_t * pb = (uint8_t *) (pd + nb);
|
||||
for (int j = 0; j < n; j += k) {
|
||||
uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs);
|
||||
uint8_t * pm = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + sizeof(float));
|
||||
uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + 2*sizeof(float));
|
||||
|
||||
//printf("n = %d, k = %d, nb = %d, row_size = %d, j = %d, pm = %p, pd = %p, pb = %p\n", n, k, nb, row_size, j, pm, pd, pb);
|
||||
|
||||
|
@ -516,8 +545,10 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t
|
|||
const float d = (max - min) / ((1 << 4) - 1);
|
||||
const float id = d ? 1.0f/d : 0.0f;
|
||||
|
||||
pm[i] = min;
|
||||
pd[i] = d;
|
||||
*(float *) pd = d;
|
||||
*(float *) pm = min;
|
||||
pd += bs;
|
||||
pm += bs;
|
||||
|
||||
for (int l = 0; l < qk; l += 2) {
|
||||
const float v0 = (src[j + i*qk + l + 0] - min)*id;
|
||||
|
@ -535,7 +566,8 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t
|
|||
pp[l/2] = vi0 | (vi1 << 4);
|
||||
}
|
||||
|
||||
memcpy(pb + i*qk/2, pp, pp_size);
|
||||
memcpy(pb, pp, pp_size);
|
||||
pb += bs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
9
utils.h
9
utils.h
|
@ -17,6 +17,7 @@ struct gpt_params {
|
|||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||
int32_t n_predict = 128; // new tokens to predict
|
||||
int32_t repeat_last_n = 64; // last n tokens to penalize
|
||||
int32_t n_ctx = 512; //context size
|
||||
|
||||
// sampling parameters
|
||||
int32_t top_k = 40;
|
||||
|
@ -26,14 +27,14 @@ struct gpt_params {
|
|||
|
||||
int32_t n_batch = 8; // batch size for prompt processing
|
||||
|
||||
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
||||
std::string prompt;
|
||||
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
||||
std::string prompt = "";
|
||||
std::string antiprompt = ""; // string upon seeing which more user input is prompted
|
||||
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
|
||||
bool interactive = false; // interactive mode
|
||||
bool interactive_start = false; // reverse prompt immediately
|
||||
std::string antiprompt = ""; // string upon seeing which more user input is prompted
|
||||
bool instruct = false; // instruction mode (used for Alpaca models)
|
||||
};
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue