Merge branch 'ggerganov:master' into master
This commit is contained in:
commit
4e3b9f2f9c
28 changed files with 1685 additions and 1102 deletions
2
.flake8
Normal file
2
.flake8
Normal file
|
@ -0,0 +1,2 @@
|
|||
[flake8]
|
||||
max-line-length = 125
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -34,8 +34,10 @@ models/*
|
|||
/perplexity
|
||||
/embedding
|
||||
/train-text-from-scratch
|
||||
/simple
|
||||
/benchmark-matmult
|
||||
/vdot
|
||||
/server
|
||||
/Pipfile
|
||||
/libllama.so
|
||||
|
||||
|
|
15
.pre-commit-config.yaml
Normal file
15
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
exclude: prompts/.*.txt
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 6.0.0
|
||||
hooks:
|
||||
- id: flake8
|
|
@ -461,8 +461,10 @@ target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
|
|||
target_compile_features(ggml PUBLIC c_std_11) # don't bump
|
||||
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
|
||||
|
||||
add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
add_library(ggml_shared SHARED $<TARGET_OBJECTS:ggml>)
|
||||
endif()
|
||||
|
||||
add_library(llama
|
||||
|
@ -490,6 +492,10 @@ if (GGML_SOURCES_CUDA)
|
|||
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
|
||||
set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF)
|
||||
set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
||||
|
||||
set_property(TARGET ggml_static PROPERTY CUDA_ARCHITECTURES OFF)
|
||||
set_property(TARGET ggml_static PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
||||
|
||||
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
|
||||
endif()
|
||||
|
||||
|
|
11
Makefile
11
Makefile
|
@ -3,6 +3,8 @@ BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-tex
|
|||
|
||||
ifdef LLAMA_BUILD_SERVER
|
||||
BUILD_TARGETS += server
|
||||
LLAMA_SERVER_VERBOSE ?= 1
|
||||
server: private CXXFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
|
||||
endif
|
||||
|
||||
default: $(BUILD_TARGETS)
|
||||
|
@ -142,11 +144,7 @@ endif # LLAMA_NO_ACCELERATE
|
|||
|
||||
ifdef LLAMA_OPENBLAS
|
||||
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas -I/usr/include/openblas
|
||||
ifneq ($(shell grep -e "Arch Linux" -e "ID_LIKE=arch" /etc/os-release 2>/dev/null),)
|
||||
LDFLAGS += -lopenblas -lcblas
|
||||
else
|
||||
LDFLAGS += -lopenblas
|
||||
endif
|
||||
endif # LLAMA_OPENBLAS
|
||||
|
||||
ifdef LLAMA_BLIS
|
||||
|
@ -254,7 +252,7 @@ $(info )
|
|||
ggml.o: ggml.c ggml.h ggml-cuda.h
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama-util.h
|
||||
llama.o: llama.cpp ggml.h ggml-cuda.h ggml-metal.h llama.h llama-util.h
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
|
||||
common.o: examples/common.cpp examples/common.h
|
||||
|
@ -278,9 +276,6 @@ main: examples/main/main.cpp build-info.h ggml.
|
|||
|
||||
simple: examples/simple/simple.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
@echo
|
||||
@echo '==== Run ./simple -h for help. ===='
|
||||
@echo
|
||||
|
||||
quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
|
|
49
README.md
49
README.md
|
@ -336,7 +336,6 @@ Building the program with BLAS support may lead to some performance improvements
|
|||
cmake .. -DLLAMA_CUBLAS=ON
|
||||
cmake --build . --config Release
|
||||
```
|
||||
Note: Because llama.cpp uses multiple CUDA streams for matrix multiplication results [are not guaranteed to be reproducible](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility). If you need reproducibility, set `GGML_CUDA_MAX_STREAMS` in the file `ggml-cuda.cu` to 1.
|
||||
|
||||
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used.
|
||||
|
||||
|
@ -616,8 +615,14 @@ And after 4.45 hours, you will have the final perplexity.
|
|||
|
||||
### Android
|
||||
|
||||
#### Building the Project using Android NDK
|
||||
You can easily run `llama.cpp` on Android device with [termux](https://termux.dev/).
|
||||
First, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
|
||||
|
||||
First, install the essential packages for termux:
|
||||
```
|
||||
pkg install clang wget git cmake
|
||||
```
|
||||
Second, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
|
||||
```
|
||||
$ mkdir build-android
|
||||
$ cd build-android
|
||||
|
@ -630,6 +635,46 @@ Finally, copy the `llama` binary and the model files to your device storage. Her
|
|||
|
||||
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
||||
|
||||
#### Building the Project using Termux (F-Droid)
|
||||
Termux from F-Droid offers an alternative route to execute the project on an Android device. This method empowers you to construct the project right from within the terminal, negating the requirement for a rooted device or SD Card.
|
||||
|
||||
Outlined below are the directives for installing the project using OpenBLAS and CLBlast. This combination is specifically designed to deliver peak performance on recent devices that feature a GPU.
|
||||
|
||||
If you opt to utilize OpenBLAS, you'll need to install the corresponding package.
|
||||
```
|
||||
apt install libopenblas
|
||||
```
|
||||
|
||||
Subsequently, if you decide to incorporate CLBlast, you'll first need to install the requisite OpenCL packages:
|
||||
```
|
||||
apt install ocl-icd opencl-headers opencl-clhpp clinfo
|
||||
```
|
||||
|
||||
In order to compile CLBlast, you'll need to first clone the respective Git repository, which can be found at this URL: https://github.com/CNugteren/CLBlast. Alongside this, clone this repository into your home directory. Once this is done, navigate to the CLBlast folder and execute the commands detailed below:
|
||||
```
|
||||
cmake .
|
||||
make
|
||||
cp libclblast.so* $PREFIX/lib
|
||||
cp ./include/clblast.h ../llama.cpp
|
||||
```
|
||||
|
||||
Following the previous steps, navigate to the LlamaCpp directory. To compile it with OpenBLAS and CLBlast, execute the command provided below:
|
||||
```
|
||||
cp /data/data/com.termux/files/usr/include/openblas/cblas.h .
|
||||
cp /data/data/com.termux/files/usr/include/openblas/openblas_config.h .
|
||||
make LLAMA_CLBLAST=1 //(sometimes you need to run this command twice)
|
||||
```
|
||||
|
||||
Upon completion of the aforementioned steps, you will have successfully compiled the project. To run it using CLBlast, a slight adjustment is required: a command must be issued to direct the operations towards your device's physical GPU, rather than the virtual one. The necessary command is detailed below:
|
||||
```
|
||||
GGML_OPENCL_PLATFORM=0
|
||||
GGML_OPENCL_DEVICE=0
|
||||
export LD_LIBRARY_PATH=/system/vendor/lib64:$LD_LIBRARY_PATH
|
||||
./main (...)
|
||||
```
|
||||
|
||||
For easy and swift re-execution, consider documenting this final part in a .sh script file. This will enable you to rerun the process with minimal hassle.
|
||||
|
||||
### Docker
|
||||
|
||||
#### Prerequisites
|
||||
|
|
22
convert.py
22
convert.py
|
@ -512,7 +512,11 @@ class LazyTensor:
|
|||
if not isinstance(self.data_type, QuantizedDataType):
|
||||
raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})")
|
||||
if self.data_type.have_g_idx:
|
||||
sys.stderr.write("Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), which is not yet natively supported by GGML. For now you can still convert this model by passing `--outtype f16` to dequantize, but that will result in a much larger output file for no quality benefit.\n")
|
||||
sys.stderr.write(
|
||||
"Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), "
|
||||
"which is not yet natively supported by GGML. "
|
||||
"For now you can still convert this model by passing `--outtype f16` to dequantize, "
|
||||
"but that will result in a much larger output file for no quality benefit.\n")
|
||||
sys.exit(1)
|
||||
assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends
|
||||
|
||||
|
@ -695,7 +699,8 @@ class LazyUnpickler(pickle.Unpickler):
|
|||
return LazyStorage(load=load, kind=pid[1], description=description)
|
||||
|
||||
# @staticmethod
|
||||
def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, # pyright: ignore[reportSelfClsParameterName]
|
||||
def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
|
||||
# pyright: ignore[reportSelfClsParameterName]
|
||||
requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
|
||||
assert isinstance(storage, LazyStorage)
|
||||
|
||||
|
@ -1054,7 +1059,7 @@ def load_some_model(path: Path) -> ModelPlus:
|
|||
files = list(path.glob("model-00001-of-*.safetensors"))
|
||||
if not files:
|
||||
# Try the PyTorch patterns too, with lower priority
|
||||
globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin" ]
|
||||
globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
|
||||
files = [file for glob in globs for file in path.glob(glob)]
|
||||
if not files:
|
||||
# Try GGML too, but with lower priority, since if both a non-GGML
|
||||
|
@ -1094,7 +1099,9 @@ def load_vocab(path: Path) -> SentencePieceVocab:
|
|||
elif path3.exists():
|
||||
path = path3
|
||||
else:
|
||||
raise FileNotFoundError(f"Could not find tokenizer.model in {path} or its parent; if it's in another directory, pass the directory as --vocab-dir")
|
||||
raise FileNotFoundError(
|
||||
f"Could not find tokenizer.model in {path} or its parent; "
|
||||
"if it's in another directory, pass the directory as --vocab-dir")
|
||||
added_tokens_path = path.parent / "added_tokens.json"
|
||||
print(f"Loading vocab file {path}")
|
||||
return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None)
|
||||
|
@ -1110,7 +1117,9 @@ def default_outfile(model_paths: List[Path], params: Params) -> Path:
|
|||
}[params.file_type]
|
||||
ret = model_paths[0].parent / f"ggml-model-{namestr}.bin"
|
||||
if ret in model_paths:
|
||||
sys.stderr.write(f"Error: Default output path ({ret}) would overwrite the input. Please explicitly specify a path using --outfile.\n")
|
||||
sys.stderr.write(
|
||||
f"Error: Default output path ({ret}) would overwrite the input. "
|
||||
"Please explicitly specify a path using --outfile.\n")
|
||||
sys.exit(1)
|
||||
return ret
|
||||
|
||||
|
@ -1131,7 +1140,8 @@ def main(args_in: Optional[List[str]] = None) -> None:
|
|||
parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)")
|
||||
parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
|
||||
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
||||
parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
|
||||
parser.add_argument("model", type=Path,
|
||||
help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
|
||||
args = parser.parse_args(args_in)
|
||||
|
||||
vocab: Vocab
|
||||
|
|
|
@ -38,6 +38,7 @@ else()
|
|||
add_subdirectory(benchmark)
|
||||
add_subdirectory(baby-llama)
|
||||
add_subdirectory(train-text-from-scratch)
|
||||
add_subdirectory(simple)
|
||||
if (LLAMA_METAL)
|
||||
add_subdirectory(metal)
|
||||
endif()
|
||||
|
|
|
@ -106,9 +106,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
}
|
||||
|
||||
if (arg == "-s" || arg == "--seed") {
|
||||
#if defined(GGML_USE_CUBLAS)
|
||||
fprintf(stderr, "WARNING: when using cuBLAS generation results are NOT guaranteed to be reproducible.\n");
|
||||
#endif
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import sys, os
|
||||
import os
|
||||
import csv
|
||||
|
||||
labels = []
|
||||
|
@ -8,6 +8,7 @@ numEntries = 1
|
|||
|
||||
rows = []
|
||||
|
||||
|
||||
def bar_chart(numbers, labels, pos):
|
||||
plt.bar(pos, numbers, color='blue')
|
||||
plt.xticks(ticks=pos, labels=labels)
|
||||
|
@ -16,6 +17,7 @@ def bar_chart(numbers, labels, pos):
|
|||
plt.ylabel("Questions Correct")
|
||||
plt.show()
|
||||
|
||||
|
||||
def calculatecorrect():
|
||||
directory = os.fsencode("./examples/jeopardy/results/")
|
||||
csv_reader = csv.reader(open("./examples/jeopardy/qasheet.csv", 'rt'), delimiter=',')
|
||||
|
@ -38,14 +40,13 @@ def calculatecorrect():
|
|||
print(line)
|
||||
else:
|
||||
print("Correct answer: " + rows[i][2] + "\n")
|
||||
i+=1
|
||||
i += 1
|
||||
print("Did the AI get the question right? (y/n)")
|
||||
if input() == "y":
|
||||
totalcorrect += 1
|
||||
numbers.append(totalcorrect)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
calculatecorrect()
|
||||
pos = list(range(numEntries))
|
||||
|
|
|
@ -354,7 +354,7 @@ int main(int argc, char ** argv) {
|
|||
if ((int)embd.size() > max_embd_size) {
|
||||
auto skipped_tokens = embd.size() - max_embd_size;
|
||||
console_set_color(con_st, CONSOLE_COLOR_ERROR);
|
||||
printf("<<input too long: skipped %" PRIu64 " token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
|
||||
printf("<<input too long: skipped %zu token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
|
||||
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
fflush(stdout);
|
||||
embd.resize(max_embd_size);
|
||||
|
|
|
@ -40,8 +40,10 @@ int main(int argc, char ** argv) {
|
|||
// this allocates all Metal resources and memory buffers
|
||||
auto * ctx_metal = ggml_metal_init();
|
||||
|
||||
ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data));
|
||||
ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval));
|
||||
const size_t max_size_data = ggml_get_max_tensor_size(ctx_data);
|
||||
const size_t max_size_eval = ggml_get_max_tensor_size(ctx_eval);
|
||||
ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data), max_size_data);
|
||||
ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval), max_size_eval);
|
||||
|
||||
// main
|
||||
{
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
set(TARGET server)
|
||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_executable(${TARGET} server.cpp json.hpp httplib.h)
|
||||
target_compile_definitions(${TARGET} PRIVATE
|
||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||
)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
if(TARGET BUILD_INFO)
|
||||
|
|
|
@ -1,37 +1,74 @@
|
|||
# llama.cpp/example/server
|
||||
|
||||
This example allow you to have a llama.cpp http server to interact from a web page or consume the API.
|
||||
This example demonstrates a simple HTTP API server to interact with llama.cpp.
|
||||
|
||||
## Table of Contents
|
||||
Command line options:
|
||||
|
||||
1. [Quick Start](#quick-start)
|
||||
2. [Node JS Test](#node-js-test)
|
||||
3. [API Endpoints](#api-endpoints)
|
||||
4. [More examples](#more-examples)
|
||||
5. [Common Options](#common-options)
|
||||
6. [Performance Tuning and Memory Options](#performance-tuning-and-memory-options)
|
||||
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
||||
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
||||
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
||||
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
||||
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
||||
- `-lv, --low-vram`: Do not allocate a VRAM scratch buffer for holding temporary results. Reduces VRAM usage at the cost of performance, particularly prompt processing speed. Requires cuBLAS.
|
||||
- `-b N`, `--batch-size N`: Set the batch size for prompt processing. Default: `512`.
|
||||
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
||||
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
||||
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
||||
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
||||
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
||||
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
||||
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
||||
- `--port`: Set the port to listen. Default: `8080`.
|
||||
|
||||
## Build
|
||||
|
||||
Build llama.cpp with server from repository root with either make or CMake.
|
||||
|
||||
- Using `make`:
|
||||
|
||||
```bash
|
||||
LLAMA_BUILD_SERVER=1 make
|
||||
```
|
||||
|
||||
- Using `CMake`:
|
||||
|
||||
```bash
|
||||
mkdir build-server
|
||||
cd build-server
|
||||
cmake -DLLAMA_BUILD_SERVER=ON ..
|
||||
cmake --build . --config Release
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
To get started right away, run the following command, making sure to use the correct path for the model you have:
|
||||
|
||||
#### Unix-based systems (Linux, macOS, etc.):
|
||||
Make sure to build with the server option on
|
||||
```bash
|
||||
LLAMA_BUILD_SERVER=1 make
|
||||
```
|
||||
### Unix-based systems (Linux, macOS, etc.):
|
||||
|
||||
```bash
|
||||
./server -m models/7B/ggml-model.bin --ctx_size 2048
|
||||
./server -m models/7B/ggml-model.bin -c 2048
|
||||
```
|
||||
|
||||
#### Windows:
|
||||
### Windows:
|
||||
|
||||
```powershell
|
||||
server.exe -m models\7B\ggml-model.bin --ctx_size 2048
|
||||
server.exe -m models\7B\ggml-model.bin -c 2048
|
||||
```
|
||||
|
||||
That will start a server that by default listens on `127.0.0.1:8080`. You can consume the endpoints with Postman or NodeJS with axios library.
|
||||
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
||||
You can consume the endpoints with Postman or NodeJS with axios library.
|
||||
|
||||
## Testing with CURL
|
||||
|
||||
Using [curl](https://curl.se/). On Windows `curl.exe` should be available in the base OS.
|
||||
|
||||
```sh
|
||||
curl --request POST \
|
||||
--url http://localhost:8080/completion \
|
||||
--data '{"prompt": "Building a website can be done in 10 simple steps:","n_predict": 128}'
|
||||
```
|
||||
|
||||
## Node JS Test
|
||||
|
||||
|
@ -54,7 +91,6 @@ const prompt = `Building a website can be done in 10 simple steps:`;
|
|||
async function Test() {
|
||||
let result = await axios.post("http://127.0.0.1:8080/completion", {
|
||||
prompt,
|
||||
batch_size: 128,
|
||||
n_predict: 512,
|
||||
});
|
||||
|
||||
|
@ -73,247 +109,75 @@ node .
|
|||
|
||||
## API Endpoints
|
||||
|
||||
You can interact with this API Endpoints. This implementations just support chat style interaction.
|
||||
- **POST** `/completion`: Given a prompt, it returns the predicted completion.
|
||||
|
||||
- **POST** `hostname:port/completion`: Setting up the Llama Context to begin the completions tasks.
|
||||
*Options:*
|
||||
|
||||
*Options:*
|
||||
`temperature`: Adjust the randomness of the generated text (default: 0.8).
|
||||
|
||||
`batch_size`: Set the batch size for prompt processing (default: 512).
|
||||
`top_k`: Limit the next token selection to the K most probable tokens (default: 40).
|
||||
|
||||
`temperature`: Adjust the randomness of the generated text (default: 0.8).
|
||||
`top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.9).
|
||||
|
||||
`top_k`: Limit the next token selection to the K most probable tokens (default: 40).
|
||||
`n_predict`: Set the number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. (default: 128, -1 = infinity).
|
||||
|
||||
`top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.9).
|
||||
`n_keep`: Specify the number of tokens from the initial prompt to retain when the model resets its internal context.
|
||||
By default, this value is set to 0 (meaning no tokens are kept). Use `-1` to retain all tokens from the initial prompt.
|
||||
|
||||
`n_predict`: Set the number of tokens to predict when generating text (default: 128, -1 = infinity).
|
||||
`stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
|
||||
|
||||
`threads`: Set the number of threads to use during computation.
|
||||
`prompt`: Provide a prompt. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate.
|
||||
|
||||
`n_keep`: Specify the number of tokens from the initial prompt to retain when the model resets its internal context. By default, this value is set to 0 (meaning no tokens are kept). Use `-1` to retain all tokens from the initial prompt.
|
||||
`stop`: Specify a JSON array of stopping strings.
|
||||
These words will not be included in the completion, so make sure to add them to the prompt for the next iteration (default: []).
|
||||
|
||||
`as_loop`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
|
||||
`tfs_z`: Enable tail free sampling with parameter z (default: 1.0, 1.0 = disabled).
|
||||
|
||||
`interactive`: It allows interacting with the completion, and the completion stops as soon as it encounters a `stop word`. To enable this, set to `true`.
|
||||
`typical_p`: Enable locally typical sampling with parameter p (default: 1.0, 1.0 = disabled).
|
||||
|
||||
`prompt`: Provide a prompt. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate.
|
||||
`repeat_penalty`: Control the repetition of token sequences in the generated text (default: 1.1).
|
||||
|
||||
`stop`: Specify the words or characters that indicate a stop. These words will not be included in the completion, so make sure to add them to the prompt for the next iteration.
|
||||
`repeat_last_n`: Last n tokens to consider for penalizing repetition (default: 64, 0 = disabled, -1 = ctx-size).
|
||||
|
||||
`exclude`: Specify the words or characters you do not want to appear in the completion. These words will not be included in the completion, so make sure to add them to the prompt for the next iteration.
|
||||
`penalize_nl`: Penalize newline tokens when applying the repeat penalty (default: true).
|
||||
|
||||
- **POST** `hostname:port/embedding`: Generate embedding of a given text
|
||||
`presence_penalty`: Repeat alpha presence penalty (default: 0.0, 0.0 = disabled).
|
||||
|
||||
*Options:*
|
||||
`frequency_penalty`: Repeat alpha frequency penalty (default: 0.0, 0.0 = disabled);
|
||||
|
||||
`content`: Set the text to get generate the embedding.
|
||||
`mirostat`: Enable Mirostat sampling, controlling perplexity during text generation (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0).
|
||||
|
||||
`threads`: Set the number of threads to use during computation.
|
||||
`mirostat_tau`: Set the Mirostat target entropy, parameter tau (default: 5.0).
|
||||
|
||||
To use this endpoint, you need to start the server with the `--embedding` option added.
|
||||
`mirostat_eta`: Set the Mirostat learning rate, parameter eta (default: 0.1).
|
||||
|
||||
- **POST** `hostname:port/tokenize`: Tokenize a given text
|
||||
`seed`: Set the random number generator (RNG) seed (default: -1, < 0 = random seed).
|
||||
|
||||
*Options:*
|
||||
`ignore_eos`: Ignore end of stream token and continue generating (default: false).
|
||||
|
||||
`content`: Set the text to tokenize.
|
||||
`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced (default: []).
|
||||
|
||||
- **GET** `hostname:port/next-token`: Receive the next token predicted, execute this request in a loop. Make sure set `as_loop` as `true` in the completion request.
|
||||
- **POST** `/tokenize`: Tokenize a given text.
|
||||
|
||||
*Options:*
|
||||
*Options:*
|
||||
|
||||
`stop`: Set `hostname:port/next-token?stop=true` to stop the token generation.
|
||||
`content`: Set the text to tokenize.
|
||||
|
||||
## More examples
|
||||
|
||||
### Interactive mode
|
||||
|
||||
This mode allows interacting in a chat-like manner. It is recommended for models designed as assistants such as `Vicuna`, `WizardLM`, `Koala`, among others. Make sure to add the correct stop word for the corresponding model.
|
||||
Check the sample in [chat.mjs](chat.mjs).
|
||||
Run with NodeJS version 16 or later:
|
||||
|
||||
The prompt should be generated by you, according to the model's guidelines. You should keep adding the model's completions to the context as well.
|
||||
|
||||
This example works well for `Vicuna - version 1`.
|
||||
|
||||
```javascript
|
||||
const axios = require("axios");
|
||||
|
||||
let prompt = `A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
|
||||
### Human: Hello, Assistant.
|
||||
### Assistant: Hello. How may I help you today?
|
||||
### Human: Please tell me the largest city in Europe.
|
||||
### Assistant: Sure. The largest city in Europe is Moscow, the capital of Russia.`;
|
||||
|
||||
async function ChatCompletion(answer) {
|
||||
// the user's next question to the prompt
|
||||
prompt += `\n### Human: ${answer}\n`
|
||||
|
||||
result = await axios.post("http://127.0.0.1:8080/completion", {
|
||||
prompt,
|
||||
batch_size: 128,
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: -1,
|
||||
n_predict: 2048,
|
||||
stop: ["\n### Human:"], // when detect this, stop completion
|
||||
exclude: ["### Assistant:"], // no show in the completion
|
||||
threads: 8,
|
||||
as_loop: true, // use this to request the completion token by token
|
||||
interactive: true, // enable the detection of a stop word
|
||||
});
|
||||
|
||||
// create a loop to receive every token predicted
|
||||
// note: this operation is blocking, avoid use this in a ui thread
|
||||
|
||||
let message = "";
|
||||
while (true) {
|
||||
// you can stop the inference adding '?stop=true' like this http://127.0.0.1:8080/next-token?stop=true
|
||||
result = await axios.get("http://127.0.0.1:8080/next-token");
|
||||
process.stdout.write(result.data.content);
|
||||
message += result.data.content;
|
||||
|
||||
// to avoid an infinite loop
|
||||
if (result.data.stop) {
|
||||
console.log("Completed");
|
||||
// make sure to add the completion to the prompt.
|
||||
prompt += `### Assistant: ${message}`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This function should be called every time a question to the model is needed.
|
||||
async function Test() {
|
||||
// the server can't inference in paralell
|
||||
await ChatCompletion("Write a long story about a time magician in a fantasy world");
|
||||
await ChatCompletion("Summary the story");
|
||||
}
|
||||
|
||||
Test();
|
||||
```sh
|
||||
node chat.mjs
|
||||
```
|
||||
|
||||
### Alpaca example
|
||||
Another sample in [chat.sh](chat.sh).
|
||||
Requires [bash](https://www.gnu.org/software/bash/), [curl](https://curl.se) and [jq](https://jqlang.github.io/jq/).
|
||||
Run with bash:
|
||||
|
||||
**Temporaly note:** no tested, if you have the model, please test it and report me some issue
|
||||
|
||||
```javascript
|
||||
const axios = require("axios");
|
||||
|
||||
let prompt = `Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
||||
`;
|
||||
|
||||
async function DoInstruction(instruction) {
|
||||
prompt += `\n\n### Instruction:\n\n${instruction}\n\n### Response:\n\n`;
|
||||
result = await axios.post("http://127.0.0.1:8080/completion", {
|
||||
prompt,
|
||||
batch_size: 128,
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: -1,
|
||||
n_predict: 2048,
|
||||
stop: ["### Instruction:\n\n"], // when detect this, stop completion
|
||||
exclude: [], // no show in the completion
|
||||
threads: 8,
|
||||
as_loop: true, // use this to request the completion token by token
|
||||
interactive: true, // enable the detection of a stop word
|
||||
});
|
||||
|
||||
// create a loop to receive every token predicted
|
||||
// note: this operation is blocking, avoid use this in a ui thread
|
||||
|
||||
let message = "";
|
||||
while (true) {
|
||||
result = await axios.get("http://127.0.0.1:8080/next-token");
|
||||
process.stdout.write(result.data.content);
|
||||
message += result.data.content;
|
||||
|
||||
// to avoid an infinite loop
|
||||
if (result.data.stop) {
|
||||
console.log("Completed");
|
||||
// make sure to add the completion and the user's next question to the prompt.
|
||||
prompt += message;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This function should be called every time a instruction to the model is needed.
|
||||
DoInstruction("Destroy the world"); // as joke
|
||||
```sh
|
||||
bash chat.sh
|
||||
```
|
||||
|
||||
### Embeddings
|
||||
|
||||
First, run the server with `--embedding` option:
|
||||
|
||||
```bash
|
||||
server -m models/7B/ggml-model.bin --ctx_size 2048 --embedding
|
||||
```
|
||||
|
||||
Run this code in NodeJS:
|
||||
|
||||
```javascript
|
||||
const axios = require('axios');
|
||||
|
||||
async function Test() {
|
||||
let result = await axios.post("http://127.0.0.1:8080/embedding", {
|
||||
content: `Hello`,
|
||||
threads: 5
|
||||
});
|
||||
// print the embedding array
|
||||
console.log(result.data.embedding);
|
||||
}
|
||||
|
||||
Test();
|
||||
```
|
||||
|
||||
### Tokenize
|
||||
|
||||
Run this code in NodeJS:
|
||||
|
||||
```javascript
|
||||
const axios = require('axios');
|
||||
|
||||
async function Test() {
|
||||
let result = await axios.post("http://127.0.0.1:8080/tokenize", {
|
||||
content: `Hello`
|
||||
});
|
||||
// print the embedding array
|
||||
console.log(result.data.tokens);
|
||||
}
|
||||
|
||||
Test();
|
||||
```
|
||||
|
||||
## Common Options
|
||||
|
||||
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
||||
- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
||||
- `-ngl N, --n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
||||
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
||||
- `-lv, --low-vram`: Do not allocate a VRAM scratch buffer for holding temporary results. Reduces VRAM usage at the cost of performance, particularly prompt processing speed. Requires cuBLAS.
|
||||
- `--embedding`: Enable the embedding mode. **Completion function doesn't work in this mode**.
|
||||
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`;
|
||||
- `--port`: Set the port to listen. Default: `8080`.
|
||||
|
||||
### RNG Seed
|
||||
|
||||
- `-s SEED, --seed SEED`: Set the random number generator (RNG) seed (default: -1, < 0 = random seed).
|
||||
|
||||
The RNG seed is used to initialize the random number generator that influences the text generation process. By setting a specific seed value, you can obtain consistent and reproducible results across multiple runs with the same input and settings. This can be helpful for testing, debugging, or comparing the effects of different options on the generated text to see when they diverge. If the seed is set to a value less than 0, a random seed will be used, which will result in different outputs on each run.
|
||||
|
||||
## Performance Tuning and Memory Options
|
||||
|
||||
### No Memory Mapping
|
||||
|
||||
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. However, if the model is larger than your total amount of RAM or if your system is low on available memory, using mmap might increase the risk of pageouts, negatively impacting performance.
|
||||
|
||||
### Memory Float 32
|
||||
|
||||
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. This doubles the context memory requirement but does not appear to increase generation quality in a measurable way. Not recommended.
|
||||
|
||||
## Limitations:
|
||||
|
||||
- The actual implementation of llama.cpp need a `llama-state` for handle multiple contexts and clients, but this could require more powerful hardware.
|
||||
|
|
89
examples/server/chat.mjs
Normal file
89
examples/server/chat.mjs
Normal file
|
@ -0,0 +1,89 @@
|
|||
import * as readline from 'node:readline'
|
||||
import { stdin, stdout } from 'node:process'
|
||||
|
||||
const API_URL = 'http://127.0.0.1:8080'
|
||||
|
||||
const chat = [
|
||||
{
|
||||
human: "Hello, Assistant.",
|
||||
assistant: "Hello. How may I help you today?"
|
||||
},
|
||||
{
|
||||
human: "Please tell me the largest city in Europe.",
|
||||
assistant: "Sure. The largest city in Europe is Moscow, the capital of Russia."
|
||||
},
|
||||
]
|
||||
|
||||
const instruction = `A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.`
|
||||
|
||||
function format_prompt(question) {
|
||||
return `${instruction}\n${
|
||||
chat.map(m =>`### Human: ${m.human}\n### Assistant: ${m.assistant}`).join("\n")
|
||||
}\n### Human: ${question}\n### Assistant:`
|
||||
}
|
||||
|
||||
async function tokenize(content) {
|
||||
const result = await fetch(`${API_URL}/tokenize`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ content })
|
||||
})
|
||||
|
||||
if (!result.ok) {
|
||||
return []
|
||||
}
|
||||
|
||||
return await result.json().tokens
|
||||
}
|
||||
|
||||
const n_keep = await tokenize(instruction).length
|
||||
|
||||
async function chat_completion(question) {
|
||||
const result = await fetch(`${API_URL}/completion`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
prompt: format_prompt(question),
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: n_keep,
|
||||
n_predict: 256,
|
||||
stop: ["\n### Human:"], // stop completion after generating this
|
||||
stream: true,
|
||||
})
|
||||
})
|
||||
|
||||
if (!result.ok) {
|
||||
return
|
||||
}
|
||||
|
||||
let answer = ''
|
||||
|
||||
for await (var chunk of result.body) {
|
||||
const t = Buffer.from(chunk).toString('utf8')
|
||||
if (t.startsWith('data: ')) {
|
||||
const message = JSON.parse(t.substring(6))
|
||||
answer += message.content
|
||||
process.stdout.write(message.content)
|
||||
if (message.stop) {
|
||||
if (message.truncated) {
|
||||
chat.shift()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process.stdout.write('\n')
|
||||
chat.push({ human: question, assistant: answer.trimStart() })
|
||||
}
|
||||
|
||||
const rl = readline.createInterface({ input: stdin, output: stdout });
|
||||
|
||||
const readlineQuestion = (rl, query, options) => new Promise((resolve, reject) => {
|
||||
rl.question(query, options, resolve)
|
||||
});
|
||||
|
||||
while(true) {
|
||||
const question = await readlineQuestion(rl, '> ')
|
||||
await chat_completion(question)
|
||||
}
|
77
examples/server/chat.sh
Normal file
77
examples/server/chat.sh
Normal file
|
@ -0,0 +1,77 @@
|
|||
#!/bin/bash
|
||||
|
||||
API_URL="${API_URL:-http://127.0.0.1:8080}"
|
||||
|
||||
CHAT=(
|
||||
"Hello, Assistant."
|
||||
"Hello. How may I help you today?"
|
||||
"Please tell me the largest city in Europe."
|
||||
"Sure. The largest city in Europe is Moscow, the capital of Russia."
|
||||
)
|
||||
|
||||
INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
|
||||
|
||||
trim() {
|
||||
shopt -s extglob
|
||||
set -- "${1##+([[:space:]])}"
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
trim_trailing() {
|
||||
shopt -s extglob
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
format_prompt() {
|
||||
echo -n "${INSTRUCTION}"
|
||||
printf "\n### Human: %s\n### Assistant: %s" "${CHAT[@]}" "$1"
|
||||
}
|
||||
|
||||
tokenize() {
|
||||
curl \
|
||||
--silent \
|
||||
--request POST \
|
||||
--url "${API_URL}/tokenize" \
|
||||
--data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \
|
||||
| jq '.tokens[]'
|
||||
}
|
||||
|
||||
N_KEEP=$(tokenize "${INSTRUCTION}" | wc -l)
|
||||
|
||||
chat_completion() {
|
||||
PROMPT="$(trim_trailing "$(format_prompt "$1")")"
|
||||
DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{
|
||||
prompt: .,
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: $n_keep,
|
||||
n_predict: 256,
|
||||
stop: ["\n### Human:"],
|
||||
stream: true
|
||||
}')"
|
||||
|
||||
ANSWER=''
|
||||
|
||||
while IFS= read -r LINE; do
|
||||
if [[ $LINE = data:* ]]; then
|
||||
CONTENT="$(echo "${LINE:5}" | jq -r '.content')"
|
||||
printf "%s" "${CONTENT}"
|
||||
ANSWER+="${CONTENT}"
|
||||
fi
|
||||
done < <(curl \
|
||||
--silent \
|
||||
--no-buffer \
|
||||
--request POST \
|
||||
--url "${API_URL}/completion" \
|
||||
--data-raw "${DATA}")
|
||||
|
||||
printf "\n"
|
||||
|
||||
CHAT+=("$1" "$(trim "$ANSWER")")
|
||||
}
|
||||
|
||||
while true; do
|
||||
read -r -e -p "> " QUESTION
|
||||
chat_completion "${QUESTION}"
|
||||
done
|
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,7 @@ Basic usage instructions:
|
|||
|
||||
```bash
|
||||
# get training data
|
||||
wget https://github.com/brunoklein99/deep-learning-notes/blob/master/shakespeare.txt
|
||||
wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt
|
||||
|
||||
# train
|
||||
./bin/train-text-from-scratch \
|
||||
|
|
13
flake.nix
13
flake.nix
|
@ -48,6 +48,19 @@
|
|||
'';
|
||||
meta.mainProgram = "llama";
|
||||
};
|
||||
apps.llama-server = {
|
||||
type = "app";
|
||||
program = "${self.packages.${system}.default}/bin/llama-server";
|
||||
};
|
||||
apps.llama-embedding = {
|
||||
type = "app";
|
||||
program = "${self.packages.${system}.default}/bin/embedding";
|
||||
};
|
||||
apps.llama = {
|
||||
type = "app";
|
||||
program = "${self.packages.${system}.default}/bin/llama";
|
||||
};
|
||||
apps.default = self.apps.${system}.llama;
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = with pkgs; [
|
||||
cmake
|
||||
|
|
58
ggml-cuda.cu
58
ggml-cuda.cu
|
@ -13,6 +13,10 @@
|
|||
#include "ggml-cuda.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size");
|
||||
|
||||
#define CUDA_CHECK(err) \
|
||||
|
@ -1463,19 +1467,13 @@ static void * g_scratch_buffer = nullptr;
|
|||
static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
|
||||
static size_t g_scratch_offset = 0;
|
||||
|
||||
#define GGML_CUDA_MAX_STREAMS 8 // Set this to 1 for reproducible matrix multiplication.
|
||||
#define GGML_CUDA_MAX_EVENTS 64
|
||||
|
||||
static int g_device_count = -1;
|
||||
static int g_main_device = 0;
|
||||
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
||||
|
||||
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
||||
|
||||
static cudaStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { nullptr };
|
||||
|
||||
static cudaStream_t g_cudaStreams_memcpy_src1[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { nullptr };
|
||||
static cudaEvent_t g_cudaEvents_memcpy_src1[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_EVENTS] = { nullptr };
|
||||
static cudaStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES] = { nullptr };
|
||||
|
||||
void ggml_init_cublas() {
|
||||
static bool initialized = false;
|
||||
|
@ -1499,15 +1497,8 @@ void ggml_init_cublas() {
|
|||
for (int id = 0; id < g_device_count; ++id) {
|
||||
CUDA_CHECK(cudaSetDevice(id));
|
||||
|
||||
// create streams
|
||||
for (int i = 0; i < GGML_CUDA_MAX_STREAMS; ++i) {
|
||||
CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_main[id][i], cudaStreamNonBlocking));
|
||||
CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_memcpy_src1[id][i], cudaStreamNonBlocking));
|
||||
}
|
||||
// create events
|
||||
for (int i = 0; i < GGML_CUDA_MAX_EVENTS; ++i) {
|
||||
CUDA_CHECK(cudaEventCreateWithFlags(&g_cudaEvents_memcpy_src1[id][i], cudaEventDisableTiming));
|
||||
}
|
||||
// create main stream
|
||||
CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_main[id], cudaStreamNonBlocking));
|
||||
|
||||
// create cublas handle
|
||||
CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id]));
|
||||
|
@ -1974,6 +1965,12 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
|
|||
size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
|
||||
size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
|
||||
|
||||
// if multiple GPUs are used they need to wait for the main GPU to finish
|
||||
if (split && g_device_count > 1) {
|
||||
CUDA_CHECK(cudaSetDevice(g_main_device));
|
||||
CUDA_CHECK(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
for (int id = 0; id < g_device_count; ++id) {
|
||||
if (!split && id != g_main_device) {
|
||||
continue;
|
||||
|
@ -2072,9 +2069,7 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
|
|||
}
|
||||
const int64_t i11 = i13*ne12 + i12;
|
||||
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[id][i0 % GGML_CUDA_MAX_STREAMS];
|
||||
cudaStream_t cudaStream_memcpy_src1 = g_cudaStreams_memcpy_src1[id][i0 % GGML_CUDA_MAX_STREAMS];
|
||||
cudaEvent_t cudaEvent_memcpy_src1 = g_cudaEvents_memcpy_src1[id][i0 % GGML_CUDA_MAX_EVENTS];
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[id];
|
||||
|
||||
// for split tensors the data begins at i0 == i0_offset_low
|
||||
char * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
|
||||
|
@ -2102,14 +2097,14 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
|
|||
if (src1->backend == GGML_BACKEND_CPU) {
|
||||
GGML_ASSERT(!flatten_rows || nrows0 == ggml_nrows(src1));
|
||||
int64_t nrows1 = flatten_rows ? nrows0 : ne11;
|
||||
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, nrows1, cudaStream_memcpy_src1));
|
||||
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, nrows1, cudaStream_main));
|
||||
} else if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) {
|
||||
if (id != g_main_device) {
|
||||
GGML_ASSERT(!flatten_rows);
|
||||
float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device];
|
||||
src1_ddf_i_source += i11*src1_stride;
|
||||
CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_stride*sizeof(float),
|
||||
cudaMemcpyDeviceToDevice, cudaStream_memcpy_src1));
|
||||
cudaMemcpyDeviceToDevice, cudaStream_main));
|
||||
}
|
||||
} else if (src1_on_device && !src1_is_contiguous) {
|
||||
GGML_ASSERT(!split);
|
||||
|
@ -2118,7 +2113,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
|
|||
GGML_ASSERT(false);
|
||||
}
|
||||
}
|
||||
CUDA_CHECK(cudaEventRecord(cudaEvent_memcpy_src1, cudaStream_memcpy_src1));
|
||||
|
||||
if (!src0_on_device || !src0_is_contiguous) {
|
||||
if (src0_is_f32) {
|
||||
|
@ -2134,9 +2128,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
|
|||
CUDA_CHECK(cudaGetLastError());
|
||||
}
|
||||
|
||||
// wait with main stream until src1 memcpy is done
|
||||
CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, cudaEvent_memcpy_src1, 0));
|
||||
|
||||
// do the computation
|
||||
op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
|
||||
|
||||
|
@ -2174,8 +2165,13 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
|
|||
|
||||
// wait until each device is finished, then free their buffers
|
||||
for (int id = 0; id < g_device_count; ++id) {
|
||||
if (src0_asq[id] == 0 && src0_asf[id] == 0 && src1_asf[id] == 0 && dst_asf[id] == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CUDA_CHECK(cudaSetDevice(id));
|
||||
CUDA_CHECK(cudaDeviceSynchronize());
|
||||
|
||||
if (src0_asq[id] > 0) {
|
||||
ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
|
||||
}
|
||||
|
@ -2241,7 +2237,7 @@ void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * sr
|
|||
const int64_t ne02 = src0->ne[2];
|
||||
|
||||
CUDA_CHECK(cudaSetDevice(g_main_device));
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device][0];
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
|
||||
|
||||
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
|
||||
void * src0_ddq = src0_extra->data_device[g_main_device];
|
||||
|
@ -2253,8 +2249,6 @@ void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * sr
|
|||
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
|
||||
|
||||
ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, cudaStream_main);
|
||||
|
||||
CUDA_CHECK(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
|
||||
|
@ -2272,7 +2266,7 @@ void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1
|
|||
const int64_t nb02 = src0->nb[2];
|
||||
|
||||
CUDA_CHECK(cudaSetDevice(g_main_device));
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device][0];
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
|
||||
|
||||
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
|
||||
void * src0_ddq = src0_extra->data_device[g_main_device];
|
||||
|
@ -2287,8 +2281,6 @@ void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1
|
|||
const int channel_stride_x = nb02 / sizeof(half);
|
||||
|
||||
ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, channel_stride_x, cudaStream_main);
|
||||
|
||||
CUDA_CHECK(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
|
@ -2344,7 +2336,7 @@ void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tens
|
|||
const int64_t nb12 = src1->nb[2];
|
||||
|
||||
CUDA_CHECK(cudaSetDevice(g_main_device));
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device][0];
|
||||
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
|
||||
|
||||
const struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
|
||||
const struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
|
||||
|
@ -2362,8 +2354,6 @@ void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tens
|
|||
GGML_ASSERT(false);
|
||||
}
|
||||
|
||||
CUDA_CHECK(cudaDeviceSynchronize());
|
||||
|
||||
(void) dst;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,12 +41,15 @@ void ggml_metal_free(struct ggml_metal_context * ctx);
|
|||
// - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
|
||||
// - the mapping is used during computation to determine the arguments of the compute kernels
|
||||
// - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
|
||||
// - max_size specifies the maximum size of a tensor and is used to create shared views such
|
||||
// that it is guaranteed that the tensor will fit in at least one of the views
|
||||
//
|
||||
bool ggml_metal_add_buffer(
|
||||
struct ggml_metal_context * ctx,
|
||||
const char * name,
|
||||
void * data,
|
||||
size_t size);
|
||||
size_t size,
|
||||
size_t max_size);
|
||||
|
||||
// set data from host memory into the device
|
||||
void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
|
||||
|
|
164
ggml-metal.m
164
ggml-metal.m
|
@ -57,6 +57,7 @@ struct ggml_metal_context {
|
|||
GGML_METAL_DECL_KERNEL(get_rows_q5_k);
|
||||
GGML_METAL_DECL_KERNEL(get_rows_q6_k);
|
||||
GGML_METAL_DECL_KERNEL(rms_norm);
|
||||
GGML_METAL_DECL_KERNEL(norm);
|
||||
GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
|
||||
GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
|
||||
GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
|
||||
|
@ -66,8 +67,10 @@ struct ggml_metal_context {
|
|||
GGML_METAL_DECL_KERNEL(mul_mat_q5_k_f32);
|
||||
GGML_METAL_DECL_KERNEL(mul_mat_q6_k_f32);
|
||||
GGML_METAL_DECL_KERNEL(rope);
|
||||
GGML_METAL_DECL_KERNEL(alibi_f32);
|
||||
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
|
||||
GGML_METAL_DECL_KERNEL(cpy_f32_f32);
|
||||
GGML_METAL_DECL_KERNEL(cpy_f16_f16);
|
||||
|
||||
#undef GGML_METAL_DECL_KERNEL
|
||||
};
|
||||
|
@ -162,6 +165,7 @@ struct ggml_metal_context * ggml_metal_init(void) {
|
|||
GGML_METAL_ADD_KERNEL(get_rows_q5_k);
|
||||
GGML_METAL_ADD_KERNEL(get_rows_q6_k);
|
||||
GGML_METAL_ADD_KERNEL(rms_norm);
|
||||
GGML_METAL_ADD_KERNEL(norm);
|
||||
GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
|
||||
GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
|
||||
GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
|
||||
|
@ -171,12 +175,22 @@ struct ggml_metal_context * ggml_metal_init(void) {
|
|||
GGML_METAL_ADD_KERNEL(mul_mat_q5_k_f32);
|
||||
GGML_METAL_ADD_KERNEL(mul_mat_q6_k_f32);
|
||||
GGML_METAL_ADD_KERNEL(rope);
|
||||
GGML_METAL_ADD_KERNEL(alibi_f32);
|
||||
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
|
||||
GGML_METAL_ADD_KERNEL(cpy_f32_f32);
|
||||
GGML_METAL_ADD_KERNEL(cpy_f16_f16);
|
||||
|
||||
#undef GGML_METAL_ADD_KERNEL
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||
fprintf(stderr, "%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
|
||||
if (ctx->device.maxTransferRate != 0) {
|
||||
fprintf(stderr, "%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
|
||||
} else {
|
||||
fprintf(stderr, "%s: maxTransferRate = built-in GPU\n", __func__);
|
||||
}
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
|
@ -193,10 +207,13 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||
static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) {
|
||||
//fprintf(stderr, "%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
|
||||
|
||||
const int64_t tsize = ggml_nbytes(t);
|
||||
|
||||
// find the view that contains the tensor fully
|
||||
for (int i = 0; i < ctx->n_buffers; ++i) {
|
||||
const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
|
||||
|
||||
if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) {
|
||||
if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
|
||||
*offs = (size_t) ioffs;
|
||||
|
||||
//fprintf(stderr, "%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
|
||||
|
@ -214,7 +231,8 @@ bool ggml_metal_add_buffer(
|
|||
struct ggml_metal_context * ctx,
|
||||
const char * name,
|
||||
void * data,
|
||||
size_t size) {
|
||||
size_t size,
|
||||
size_t max_size) {
|
||||
if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) {
|
||||
fprintf(stderr, "%s: too many buffers\n", __func__);
|
||||
return false;
|
||||
|
@ -231,31 +249,69 @@ bool ggml_metal_add_buffer(
|
|||
}
|
||||
}
|
||||
|
||||
size_t page_size = getpagesize();
|
||||
size_t aligned_size = size;
|
||||
if ((aligned_size % page_size) != 0) {
|
||||
aligned_size += (page_size - (aligned_size % page_size));
|
||||
const size_t size_page = getpagesize();
|
||||
|
||||
size_t size_aligned = size;
|
||||
if ((size_aligned % size_page) != 0) {
|
||||
size_aligned += (size_page - (size_aligned % size_page));
|
||||
}
|
||||
|
||||
// the buffer fits into the max buffer size allowed by the device
|
||||
if (size_aligned <= ctx->device.maxBufferLength) {
|
||||
ctx->buffers[ctx->n_buffers].name = name;
|
||||
ctx->buffers[ctx->n_buffers].data = data;
|
||||
ctx->buffers[ctx->n_buffers].size = size;
|
||||
|
||||
if (ctx->device.maxBufferLength < aligned_size) {
|
||||
fprintf(stderr, "%s: buffer '%s' size %zu is larger than buffer maximum of %zu\n", __func__, name, aligned_size, ctx->device.maxBufferLength);
|
||||
return false;
|
||||
}
|
||||
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:aligned_size options:MTLResourceStorageModeShared deallocator:nil];
|
||||
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
|
||||
|
||||
if (ctx->buffers[ctx->n_buffers].metal == nil) {
|
||||
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, aligned_size / 1024.0 / 1024.0);
|
||||
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
|
||||
return false;
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
|
||||
|
||||
++ctx->n_buffers;
|
||||
} else {
|
||||
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB\n", __func__, name, aligned_size / 1024.0 / 1024.0);
|
||||
// this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
|
||||
// one of the views
|
||||
const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
|
||||
const size_t size_step = ctx->device.maxBufferLength - size_ovlp;
|
||||
const size_t size_view = ctx->device.maxBufferLength;
|
||||
|
||||
for (size_t i = 0; i < size; i += size_step) {
|
||||
const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
|
||||
|
||||
ctx->buffers[ctx->n_buffers].name = name;
|
||||
ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
|
||||
ctx->buffers[ctx->n_buffers].size = size_step_aligned;
|
||||
|
||||
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
|
||||
|
||||
if (ctx->buffers[ctx->n_buffers].metal == nil) {
|
||||
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
|
||||
return false;
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
|
||||
if (i + size_step < size) {
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
++ctx->n_buffers;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, ", (%8.2f / %8.2f)",
|
||||
ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
|
||||
ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||
|
||||
if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) {
|
||||
fprintf(stderr, ", warning: current allocated size is greater than the recommended max working set size\n");
|
||||
} else {
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -735,6 +791,70 @@ void ggml_metal_graph_compute(
|
|||
|
||||
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||
} break;
|
||||
case GGML_OP_NORM:
|
||||
{
|
||||
if (encoder == nil) {
|
||||
encoder = [command_buffer computeCommandEncoder];
|
||||
}
|
||||
|
||||
const float eps = 1e-5f;
|
||||
|
||||
const int nth = 256;
|
||||
|
||||
[encoder setComputePipelineState:ctx->pipeline_norm];
|
||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
||||
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
|
||||
[encoder setBytes:&eps length:sizeof( float) atIndex:4];
|
||||
[encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
|
||||
|
||||
const int64_t nrows = ggml_nrows(src0);
|
||||
|
||||
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||
} break;
|
||||
case GGML_OP_ALIBI:
|
||||
{
|
||||
if (encoder == nil) {
|
||||
encoder = [command_buffer computeCommandEncoder];
|
||||
}
|
||||
|
||||
GGML_ASSERT((src0t == GGML_TYPE_F32));
|
||||
|
||||
const int n_past = ((int32_t *) src1->data)[0]; UNUSED(n_past);
|
||||
const int n_head = ((int32_t *) src1->data)[1];
|
||||
const float max_bias = ((float *) src1->data)[2];
|
||||
|
||||
if (__builtin_popcount(n_head) != 1) {
|
||||
GGML_ASSERT(false && "only power-of-two n_head implemented");
|
||||
}
|
||||
|
||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
||||
|
||||
[encoder setComputePipelineState:ctx->pipeline_alibi_f32];
|
||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
||||
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
|
||||
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
|
||||
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
|
||||
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
|
||||
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
|
||||
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
|
||||
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
|
||||
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
|
||||
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
|
||||
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
|
||||
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
|
||||
[encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
|
||||
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
|
||||
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
||||
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
||||
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
|
||||
const int nth = 32;
|
||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||
} break;
|
||||
case GGML_OP_ROPE:
|
||||
{
|
||||
if (encoder == nil) {
|
||||
|
@ -788,6 +908,14 @@ void ggml_metal_graph_compute(
|
|||
default: GGML_ASSERT(false && "not implemented");
|
||||
};
|
||||
} break;
|
||||
case GGML_TYPE_F16:
|
||||
{
|
||||
switch (dstt) {
|
||||
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f16_f16]; break;
|
||||
case GGML_TYPE_F32: GGML_ASSERT(false && "cpy_f16_f32 not implemented"); break;
|
||||
default: GGML_ASSERT(false && "not implemented");
|
||||
};
|
||||
} break;
|
||||
default: GGML_ASSERT(false && "not implemented");
|
||||
}
|
||||
|
||||
|
@ -831,4 +959,14 @@ void ggml_metal_graph_compute(
|
|||
dispatch_barrier_sync(queue, ^{});
|
||||
|
||||
[command_buffers[n_cb - 1] waitUntilCompleted];
|
||||
|
||||
// check status of command buffers
|
||||
// needed to detect if the device ran out-of-memory for example (#1881)
|
||||
for (int i = 0; i < n_cb; i++) {
|
||||
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [command_buffers[i] status];
|
||||
if (status != MTLCommandBufferStatusCompleted) {
|
||||
fprintf(stderr, "%s: command buffer %d failed with status %lu\n", __func__, i, status);
|
||||
GGML_ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
149
ggml-metal.metal
149
ggml-metal.metal
|
@ -256,6 +256,72 @@ kernel void kernel_get_rows_q4_1(
|
|||
(device float *) ((device char *) dst + i*nb1), ne00);
|
||||
}
|
||||
|
||||
kernel void kernel_norm(
|
||||
device const void * src0,
|
||||
device float * dst,
|
||||
constant int64_t & ne00,
|
||||
constant uint64_t & nb01,
|
||||
constant float & eps,
|
||||
threadgroup float * sum [[threadgroup(0)]],
|
||||
uint tgpig[[threadgroup_position_in_grid]],
|
||||
uint tpitg[[thread_position_in_threadgroup]],
|
||||
uint ntg[[threads_per_threadgroup]]) {
|
||||
device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01);
|
||||
// MEAN
|
||||
// parallel sum
|
||||
sum[tpitg] = 0.0f;
|
||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||
sum[tpitg] += x[i00];
|
||||
}
|
||||
// reduce
|
||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||
for (uint i = ntg/2; i > 0; i /= 2) {
|
||||
if (tpitg < i) {
|
||||
sum[tpitg] += sum[tpitg + i];
|
||||
}
|
||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||
}
|
||||
// broadcast
|
||||
if (tpitg == 0) {
|
||||
sum[0] /= ne00;
|
||||
}
|
||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||
const float mean = sum[0];
|
||||
|
||||
// recenter
|
||||
device float * y = dst + tgpig*ne00;
|
||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||
y[i00] = x[i00] - mean;
|
||||
}
|
||||
|
||||
// VARIANCE
|
||||
// parallel sum
|
||||
sum[tpitg] = 0.0f;
|
||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||
sum[tpitg] += y[i00] * y[i00];
|
||||
}
|
||||
// reduce
|
||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||
for (uint i = ntg/2; i > 0; i /= 2) {
|
||||
if (tpitg < i) {
|
||||
sum[tpitg] += sum[tpitg + i];
|
||||
}
|
||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||
}
|
||||
// broadcast
|
||||
if (tpitg == 0) {
|
||||
sum[0] /= ne00;
|
||||
}
|
||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||
const float variance = sum[0];
|
||||
|
||||
const float scale = 1.0f/sqrt(variance + eps);
|
||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||
y[i00] = y[i00] * scale;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
kernel void kernel_rms_norm(
|
||||
device const void * src0,
|
||||
device float * dst,
|
||||
|
@ -485,6 +551,48 @@ kernel void kernel_mul_mat_f16_f32(
|
|||
}
|
||||
}
|
||||
|
||||
kernel void kernel_alibi_f32(
|
||||
device const float * src0,
|
||||
device float * dst,
|
||||
constant int64_t & ne00,
|
||||
constant int64_t & ne01,
|
||||
constant int64_t & ne02,
|
||||
constant int64_t & ne03,
|
||||
constant uint64_t & nb00,
|
||||
constant uint64_t & nb01,
|
||||
constant uint64_t & nb02,
|
||||
constant uint64_t & nb03,
|
||||
constant int64_t & ne0,
|
||||
constant int64_t & ne1,
|
||||
constant int64_t & ne2,
|
||||
constant int64_t & ne3,
|
||||
constant uint64_t & nb0,
|
||||
constant uint64_t & nb1,
|
||||
constant uint64_t & nb2,
|
||||
constant uint64_t & nb3,
|
||||
constant float & m0,
|
||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||
uint3 ntg[[threads_per_threadgroup]]) {
|
||||
const int64_t i03 = tgpig[2];
|
||||
const int64_t i02 = tgpig[1];
|
||||
const int64_t i01 = tgpig[0];
|
||||
|
||||
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
||||
|
||||
const int64_t i3 = n / (ne2*ne1*ne0);
|
||||
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
||||
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
||||
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
||||
|
||||
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
||||
float m_k = pow(m0, i2 + 1);
|
||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
||||
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
||||
dst_data[i00] = src[0] + m_k * (i00 - ne00 + 1);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void kernel_rope(
|
||||
device const void * src0,
|
||||
device float * dst,
|
||||
|
@ -540,6 +648,47 @@ kernel void kernel_rope(
|
|||
}
|
||||
}
|
||||
|
||||
kernel void kernel_cpy_f16_f16(
|
||||
device const half * src0,
|
||||
device half * dst,
|
||||
constant int64_t & ne00,
|
||||
constant int64_t & ne01,
|
||||
constant int64_t & ne02,
|
||||
constant int64_t & ne03,
|
||||
constant uint64_t & nb00,
|
||||
constant uint64_t & nb01,
|
||||
constant uint64_t & nb02,
|
||||
constant uint64_t & nb03,
|
||||
constant int64_t & ne0,
|
||||
constant int64_t & ne1,
|
||||
constant int64_t & ne2,
|
||||
constant int64_t & ne3,
|
||||
constant uint64_t & nb0,
|
||||
constant uint64_t & nb1,
|
||||
constant uint64_t & nb2,
|
||||
constant uint64_t & nb3,
|
||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||
uint3 ntg[[threads_per_threadgroup]]) {
|
||||
const int64_t i03 = tgpig[2];
|
||||
const int64_t i02 = tgpig[1];
|
||||
const int64_t i01 = tgpig[0];
|
||||
|
||||
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
||||
|
||||
const int64_t i3 = n / (ne2*ne1*ne0);
|
||||
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
||||
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
||||
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
||||
|
||||
device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
||||
|
||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
||||
device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
||||
dst_data[i00] = src[0];
|
||||
}
|
||||
}
|
||||
|
||||
kernel void kernel_cpy_f32_f16(
|
||||
device const float * src0,
|
||||
device half * dst,
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
|
||||
#include "ggml.h"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
#define CL_DMMV_BLOCK_SIZE 32
|
||||
|
||||
#define MULTILINE_QUOTE(...) #__VA_ARGS__
|
||||
|
|
24
ggml.c
24
ggml.c
|
@ -4154,14 +4154,34 @@ void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
|
|||
ctx->no_alloc = no_alloc;
|
||||
}
|
||||
|
||||
void * ggml_get_mem_buffer(struct ggml_context * ctx) {
|
||||
void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
|
||||
return ctx->mem_buffer;
|
||||
}
|
||||
|
||||
size_t ggml_get_mem_size(struct ggml_context * ctx) {
|
||||
size_t ggml_get_mem_size(const struct ggml_context * ctx) {
|
||||
return ctx->mem_size;
|
||||
}
|
||||
|
||||
size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
|
||||
size_t max_size = 0;
|
||||
|
||||
struct ggml_object * obj = ctx->objects_begin;
|
||||
|
||||
while (obj != NULL) {
|
||||
struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
|
||||
|
||||
const size_t size = ggml_nbytes(tensor);
|
||||
|
||||
if (max_size < size) {
|
||||
max_size = size;
|
||||
}
|
||||
|
||||
obj = obj->next;
|
||||
}
|
||||
|
||||
return max_size;
|
||||
}
|
||||
|
||||
// IMPORTANT:
|
||||
// when creating "opt" tensors, always save and load the scratch buffer
|
||||
// this is an error prone process, but it is necessary to support inplace
|
||||
|
|
5
ggml.h
5
ggml.h
|
@ -500,8 +500,9 @@ extern "C" {
|
|||
GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
|
||||
GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
|
||||
|
||||
GGML_API void * ggml_get_mem_buffer(struct ggml_context * ctx);
|
||||
GGML_API size_t ggml_get_mem_size (struct ggml_context * ctx);
|
||||
GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
|
||||
GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
|
||||
GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_tensor(
|
||||
struct ggml_context * ctx,
|
||||
|
|
42
llama.cpp
42
llama.cpp
|
@ -19,6 +19,11 @@
|
|||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_K_QUANTS
|
||||
#ifndef QK_K
|
||||
#define QK_K 256
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <array>
|
||||
#include <ctime>
|
||||
|
@ -886,6 +891,7 @@ static bool kv_cache_init(
|
|||
const int64_t n_elements = n_embd*n_mem;
|
||||
|
||||
cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
|
||||
cache.n = 0;
|
||||
|
||||
struct ggml_init_params params;
|
||||
params.mem_size = cache.buf.size;
|
||||
|
@ -904,6 +910,7 @@ static bool kv_cache_init(
|
|||
ggml_set_name(cache.k, "cache_k");
|
||||
ggml_set_name(cache.v, "cache_v");
|
||||
|
||||
(void) n_gpu_layers;
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
if (n_gpu_layers > n_layer + 1) {
|
||||
ggml_cuda_assign_buffers_no_scratch(cache.v);
|
||||
|
@ -1253,7 +1260,7 @@ static void llama_model_load_internal(
|
|||
vram_scratch = n_batch * MB;
|
||||
ggml_cuda_set_scratch_size(vram_scratch);
|
||||
if (n_gpu_layers > 0) {
|
||||
fprintf(stderr, "%s: allocating batch_size x 1 MB = %ld MB VRAM for the scratch buffer\n",
|
||||
fprintf(stderr, "%s: allocating batch_size x 1 MB = %zd MB VRAM for the scratch buffer\n",
|
||||
__func__, vram_scratch / MB);
|
||||
}
|
||||
}
|
||||
|
@ -2489,6 +2496,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||
} else {
|
||||
new_type = quantized_type;
|
||||
#ifdef GGML_USE_K_QUANTS
|
||||
if (quantized_type == GGML_TYPE_Q2_K || quantized_type == GGML_TYPE_Q3_K || quantized_type == GGML_TYPE_Q4_K ||
|
||||
quantized_type == GGML_TYPE_Q5_K || quantized_type == GGML_TYPE_Q6_K) {
|
||||
int nx = tensor.ne.at(0);
|
||||
int ny = tensor.ne.at(0);
|
||||
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
||||
fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
|
||||
fprintf(stderr, "This is required to be able to use k-quants for now!\n");
|
||||
fprintf(stderr, "========================================================================================\n\n");
|
||||
throw std::runtime_error("Unsupported tensor size encountered\n");
|
||||
}
|
||||
}
|
||||
if (tensor.name == "output.weight") {
|
||||
new_type = GGML_TYPE_Q6_K;
|
||||
} else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
|
||||
|
@ -2694,16 +2712,21 @@ struct llama_context * llama_init_from_file(
|
|||
// this allocates all Metal resources and memory buffers
|
||||
ctx->ctx_metal = ggml_metal_init();
|
||||
|
||||
void *data_ptr = NULL;
|
||||
void * data_ptr = NULL;
|
||||
size_t data_size = 0;
|
||||
|
||||
if (params.use_mmap) {
|
||||
data_ptr = ctx->model.mapping->addr;
|
||||
data_size= ctx->model.mapping->size;
|
||||
data_size = ctx->model.mapping->size;
|
||||
} else {
|
||||
data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
|
||||
data_size= ggml_get_mem_size(ctx->model.ctx);
|
||||
data_size = ggml_get_mem_size (ctx->model.ctx);
|
||||
}
|
||||
|
||||
const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
|
||||
|
||||
printf("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
|
||||
|
||||
#define LLAMA_METAL_CHECK_BUF(result) \
|
||||
if (!(result)) { \
|
||||
fprintf(stderr, "%s: failed to add buffer\n", __func__); \
|
||||
|
@ -2711,12 +2734,13 @@ struct llama_context * llama_init_from_file(
|
|||
return NULL; \
|
||||
}
|
||||
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
|
||||
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->model.kv_self.buf.addr, ctx->model.kv_self.buf.size));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->model.kv_self.buf.addr, ctx->model.kv_self.buf.size, 0));
|
||||
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size, 0));
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size, 0));
|
||||
#undef LLAMA_METAL_CHECK_BUF
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import hashlib
|
||||
|
||||
|
||||
def sha256sum(file):
|
||||
block_size = 16 * 1024 * 1024 # 16 MB block size
|
||||
b = bytearray(block_size)
|
||||
|
@ -15,6 +16,7 @@ def sha256sum(file):
|
|||
|
||||
return file_hash.hexdigest()
|
||||
|
||||
|
||||
# Define the path to the llama directory (parent folder of script directory)
|
||||
llama_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue