Merge branch 'ggerganov:master' into master
This commit is contained in:
commit
39c4b8540d
15 changed files with 292 additions and 158 deletions
29
.gitignore
vendored
29
.gitignore
vendored
|
@ -31,28 +31,29 @@ tmp/
|
||||||
models/*
|
models/*
|
||||||
models-mnt
|
models-mnt
|
||||||
|
|
||||||
/main
|
|
||||||
/quantize
|
|
||||||
/quantize-stats
|
|
||||||
/result
|
|
||||||
/perplexity
|
|
||||||
/embedding
|
|
||||||
/train-text-from-scratch
|
|
||||||
/convert-llama2c-to-ggml
|
|
||||||
/simple
|
|
||||||
/benchmark-matmult
|
|
||||||
/vdot
|
|
||||||
/server
|
|
||||||
/Pipfile
|
/Pipfile
|
||||||
|
/baby-llama
|
||||||
|
/beam-search
|
||||||
|
/benchmark-matmult
|
||||||
|
/convert-llama2c-to-ggml
|
||||||
/embd-input-test
|
/embd-input-test
|
||||||
|
/embedding
|
||||||
/gguf
|
/gguf
|
||||||
/gguf-llama-simple
|
/gguf-llama-simple
|
||||||
/libllama.so
|
/libllama.so
|
||||||
/llama-bench
|
/llama-bench
|
||||||
/baby-llama
|
/main
|
||||||
/beam-search
|
/metal
|
||||||
|
/perplexity
|
||||||
|
/quantize
|
||||||
|
/quantize-stats
|
||||||
|
/result
|
||||||
/save-load-state
|
/save-load-state
|
||||||
|
/server
|
||||||
|
/simple
|
||||||
/speculative
|
/speculative
|
||||||
|
/train-text-from-scratch
|
||||||
|
/vdot
|
||||||
build-info.h
|
build-info.h
|
||||||
arm_neon.h
|
arm_neon.h
|
||||||
compile_commands.json
|
compile_commands.json
|
||||||
|
|
|
@ -36,6 +36,12 @@ endif()
|
||||||
# Option list
|
# Option list
|
||||||
#
|
#
|
||||||
|
|
||||||
|
if (APPLE)
|
||||||
|
set(LLAMA_METAL_DEFAULT ON)
|
||||||
|
else()
|
||||||
|
set(LLAMA_METAL_DEFAULT OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
# general
|
# general
|
||||||
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
||||||
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
|
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
|
||||||
|
@ -76,7 +82,7 @@ option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some
|
||||||
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
||||||
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
|
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
|
||||||
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
||||||
option(LLAMA_METAL "llama: use Metal" OFF)
|
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
|
||||||
option(LLAMA_MPI "llama: use MPI" OFF)
|
option(LLAMA_MPI "llama: use MPI" OFF)
|
||||||
option(LLAMA_K_QUANTS "llama: use k-quants" ON)
|
option(LLAMA_K_QUANTS "llama: use k-quants" ON)
|
||||||
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
||||||
|
@ -158,6 +164,31 @@ if (APPLE AND LLAMA_ACCELERATE)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (LLAMA_METAL)
|
||||||
|
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
||||||
|
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
||||||
|
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
||||||
|
|
||||||
|
message(STATUS "Metal framework found")
|
||||||
|
|
||||||
|
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
||||||
|
|
||||||
|
add_compile_definitions(GGML_USE_METAL)
|
||||||
|
#add_compile_definitions(GGML_METAL_NDEBUG)
|
||||||
|
|
||||||
|
# get full path to the file
|
||||||
|
#add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")
|
||||||
|
|
||||||
|
# copy ggml-metal.metal to bin directory
|
||||||
|
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
|
||||||
|
|
||||||
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
||||||
|
${FOUNDATION_LIBRARY}
|
||||||
|
${METAL_FRAMEWORK}
|
||||||
|
${METALKIT_FRAMEWORK}
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (LLAMA_BLAS)
|
if (LLAMA_BLAS)
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
set(BLA_STATIC ON)
|
set(BLA_STATIC ON)
|
||||||
|
@ -293,29 +324,6 @@ if (LLAMA_CUBLAS)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_METAL)
|
|
||||||
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
|
||||||
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
|
||||||
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
|
||||||
|
|
||||||
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_METAL)
|
|
||||||
#add_compile_definitions(GGML_METAL_NDEBUG)
|
|
||||||
|
|
||||||
# get full path to the file
|
|
||||||
#add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")
|
|
||||||
|
|
||||||
# copy ggml-metal.metal to bin directory
|
|
||||||
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
|
|
||||||
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
|
||||||
${FOUNDATION_LIBRARY}
|
|
||||||
${METAL_FRAMEWORK}
|
|
||||||
${METALKIT_FRAMEWORK}
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (LLAMA_MPI)
|
if (LLAMA_MPI)
|
||||||
cmake_minimum_required(VERSION 3.10)
|
cmake_minimum_required(VERSION 3.10)
|
||||||
find_package(MPI)
|
find_package(MPI)
|
||||||
|
|
78
Makefile
78
Makefile
|
@ -7,6 +7,39 @@ TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-dou
|
||||||
# Code coverage output files
|
# Code coverage output files
|
||||||
COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report
|
COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report
|
||||||
|
|
||||||
|
ifndef UNAME_S
|
||||||
|
UNAME_S := $(shell uname -s)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef UNAME_P
|
||||||
|
UNAME_P := $(shell uname -p)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef UNAME_M
|
||||||
|
UNAME_M := $(shell uname -m)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Mac OS + Arm can report x86_64
|
||||||
|
# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
|
||||||
|
ifeq ($(UNAME_S),Darwin)
|
||||||
|
ifndef LLAMA_NO_METAL
|
||||||
|
LLAMA_METAL := 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq ($(UNAME_P),arm)
|
||||||
|
SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
|
||||||
|
ifeq ($(SYSCTL_M),1)
|
||||||
|
# UNAME_P := arm
|
||||||
|
# UNAME_M := arm64
|
||||||
|
warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))'
|
||||||
|
BUILD_TARGETS += metal
|
||||||
|
endif
|
||||||
|
|
||||||
default: $(BUILD_TARGETS)
|
default: $(BUILD_TARGETS)
|
||||||
|
|
||||||
test:
|
test:
|
||||||
|
@ -38,18 +71,6 @@ gcovr-report: coverage ## Generate gcovr report
|
||||||
mkdir -p gcovr-report
|
mkdir -p gcovr-report
|
||||||
gcovr --root . --html --html-details --output gcovr-report/coverage.html
|
gcovr --root . --html --html-details --output gcovr-report/coverage.html
|
||||||
|
|
||||||
ifndef UNAME_S
|
|
||||||
UNAME_S := $(shell uname -s)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef UNAME_P
|
|
||||||
UNAME_P := $(shell uname -p)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef UNAME_M
|
|
||||||
UNAME_M := $(shell uname -m)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef RISCV_CROSS_COMPILE
|
ifdef RISCV_CROSS_COMPILE
|
||||||
CC := riscv64-unknown-linux-gnu-gcc
|
CC := riscv64-unknown-linux-gnu-gcc
|
||||||
CXX := riscv64-unknown-linux-gnu-g++
|
CXX := riscv64-unknown-linux-gnu-g++
|
||||||
|
@ -58,19 +79,6 @@ endif
|
||||||
CCV := $(shell $(CC) --version | head -n 1)
|
CCV := $(shell $(CC) --version | head -n 1)
|
||||||
CXXV := $(shell $(CXX) --version | head -n 1)
|
CXXV := $(shell $(CXX) --version | head -n 1)
|
||||||
|
|
||||||
# Mac OS + Arm can report x86_64
|
|
||||||
# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
ifneq ($(UNAME_P),arm)
|
|
||||||
SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
|
|
||||||
ifeq ($(SYSCTL_M),1)
|
|
||||||
# UNAME_P := arm
|
|
||||||
# UNAME_M := arm64
|
|
||||||
warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Compile flags
|
# Compile flags
|
||||||
#
|
#
|
||||||
|
@ -231,14 +239,24 @@ endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef LLAMA_NO_ACCELERATE
|
ifndef LLAMA_NO_ACCELERATE
|
||||||
# Mac M1 - include Accelerate framework.
|
# Mac OS - include Accelerate framework.
|
||||||
# `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
|
# `-framework Accelerate` works both with Apple Silicon and Mac Intel
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
MK_CPPFLAGS += -DGGML_USE_ACCELERATE
|
MK_CPPFLAGS += -DGGML_USE_ACCELERATE
|
||||||
MK_LDFLAGS += -framework Accelerate
|
MK_LDFLAGS += -framework Accelerate
|
||||||
endif
|
endif
|
||||||
endif # LLAMA_NO_ACCELERATE
|
endif # LLAMA_NO_ACCELERATE
|
||||||
|
|
||||||
|
ifdef LLAMA_METAL
|
||||||
|
# By default - use GPU acceleration on Mac OS
|
||||||
|
ifeq ($(UNAME_S),Darwin)
|
||||||
|
CFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG
|
||||||
|
CXXFLAGS += -DGGML_USE_METAL
|
||||||
|
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
|
||||||
|
OBJS += ggml-metal.o
|
||||||
|
endif
|
||||||
|
endif # LLAMA_METAL
|
||||||
|
|
||||||
ifdef LLAMA_MPI
|
ifdef LLAMA_MPI
|
||||||
MK_CPPFLAGS += -DGGML_USE_MPI
|
MK_CPPFLAGS += -DGGML_USE_MPI
|
||||||
MK_CFLAGS += -Wno-cast-qual
|
MK_CFLAGS += -Wno-cast-qual
|
||||||
|
@ -477,13 +495,9 @@ baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o common.o $(OBJS)
|
||||||
beam-search: examples/beam-search/beam-search.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
beam-search: examples/beam-search/beam-search.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
speculative: examples/speculative/speculative.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
speculative: examples/speculative/speculative.cpp build-info.h ggml.o llama.o common.o grammar-parser.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))'
|
|
||||||
BUILD_TARGETS += metal
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef LLAMA_METAL
|
ifdef LLAMA_METAL
|
||||||
metal: examples/metal/metal.cpp ggml.o $(OBJS)
|
metal: examples/metal/metal.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||||
|
|
26
README.md
26
README.md
|
@ -280,29 +280,11 @@ In order to build llama.cpp you have three different options.
|
||||||
|
|
||||||
### Metal Build
|
### Metal Build
|
||||||
|
|
||||||
Using Metal allows the computation to be executed on the GPU for Apple devices:
|
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
|
||||||
|
To disable the Metal build at compile time use the `LLAMA_NO_METAL=1` flag or the `LLAMA_METAL=OFF` cmake option.
|
||||||
|
|
||||||
- Using `make`:
|
When built with Metal support, you can explicitly disable GPU inference with the `--gpu-layers|-ngl 0` command-line
|
||||||
|
argument.
|
||||||
```bash
|
|
||||||
LLAMA_METAL=1 make
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using `CMake`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mkdir build-metal
|
|
||||||
cd build-metal
|
|
||||||
cmake -DLLAMA_METAL=ON ..
|
|
||||||
cmake --build . --config Release
|
|
||||||
```
|
|
||||||
|
|
||||||
When built with Metal support, you can enable GPU inference with the `--gpu-layers|-ngl` command-line argument.
|
|
||||||
Any value larger than 0 will offload the computation to the GPU. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 -ngl 1
|
|
||||||
```
|
|
||||||
|
|
||||||
### MPI Build
|
### MPI Build
|
||||||
|
|
||||||
|
|
|
@ -717,7 +717,9 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
||||||
|
|
||||||
lparams.n_ctx = params.n_ctx;
|
lparams.n_ctx = params.n_ctx;
|
||||||
lparams.n_batch = params.n_batch;
|
lparams.n_batch = params.n_batch;
|
||||||
|
if (params.n_gpu_layers != -1) {
|
||||||
lparams.n_gpu_layers = params.n_gpu_layers;
|
lparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
|
}
|
||||||
lparams.main_gpu = params.main_gpu;
|
lparams.main_gpu = params.main_gpu;
|
||||||
lparams.tensor_split = params.tensor_split;
|
lparams.tensor_split = params.tensor_split;
|
||||||
lparams.low_vram = params.low_vram;
|
lparams.low_vram = params.low_vram;
|
||||||
|
@ -770,7 +772,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
||||||
{
|
{
|
||||||
LOG("warming up the model with an empty run\n");
|
LOG("warming up the model with an empty run\n");
|
||||||
|
|
||||||
const std::vector<llama_token> tmp = { llama_token_bos(lctx), };
|
const std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
|
||||||
llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||||
llama_reset_timings(lctx);
|
llama_reset_timings(lctx);
|
||||||
}
|
}
|
||||||
|
@ -1212,7 +1214,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
||||||
fprintf(stream, "model_draft: %s # default:\n", params.model_draft.c_str());
|
fprintf(stream, "model_draft: %s # default:\n", params.model_draft.c_str());
|
||||||
fprintf(stream, "mtest: %s # default: false\n", params.mem_test ? "true" : "false");
|
fprintf(stream, "mtest: %s # default: false\n", params.mem_test ? "true" : "false");
|
||||||
fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false");
|
fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false");
|
||||||
fprintf(stream, "n_gpu_layers: %d # default: 0\n", params.n_gpu_layers);
|
fprintf(stream, "n_gpu_layers: %d # default: -1\n", params.n_gpu_layers);
|
||||||
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);
|
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);
|
||||||
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", params.n_probs);
|
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", params.n_probs);
|
||||||
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
||||||
|
|
|
@ -34,7 +34,7 @@ struct gpt_params {
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
|
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
|
||||||
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
||||||
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
|
||||||
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
||||||
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
||||||
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
|
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
|
||||||
|
|
|
@ -58,7 +58,7 @@ def parse_args() -> argparse.Namespace:
|
||||||
parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
|
parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
|
||||||
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
||||||
parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)")
|
parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)")
|
||||||
parser.add_argument("ftype", type=int, choices=[0, 1], help="output format - use 0 for float32, 1 for float16", default = 1)
|
parser.add_argument("ftype", type=int, help="output format - use 0 for float32, 1 for float16", choices=[0, 1], default = 1)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
|
|
@ -151,14 +151,6 @@ int main(int argc, char ** argv) {
|
||||||
LOG_TEE("%s: warning: scaling RoPE frequency by %g (default 1.0)\n", __func__, params.rope_freq_scale);
|
LOG_TEE("%s: warning: scaling RoPE frequency by %g (default 1.0)\n", __func__, params.rope_freq_scale);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.n_ctx > 2048) {
|
|
||||||
// TODO: determine the actual max context of the model (e.g. 4096 for LLaMA v2) and use that instead of 2048
|
|
||||||
LOG_TEE("%s: warning: base model only supports context sizes no greater than 2048 tokens (%d specified)\n", __func__, params.n_ctx);
|
|
||||||
} else if (params.n_ctx < 8) {
|
|
||||||
LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
|
||||||
params.n_ctx = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
||||||
|
|
||||||
if (params.seed == LLAMA_DEFAULT_SEED) {
|
if (params.seed == LLAMA_DEFAULT_SEED) {
|
||||||
|
@ -194,6 +186,13 @@ int main(int argc, char ** argv) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.n_ctx > llama_n_ctx(ctx)) {
|
||||||
|
LOG_TEE("%s: warning: base model only supports context sizes no greater than %d tokens (%d specified)\n", __func__, llama_n_ctx(ctx), params.n_ctx);
|
||||||
|
} else if (params.n_ctx < 8) {
|
||||||
|
LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
||||||
|
params.n_ctx = 8;
|
||||||
|
}
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
|
|
|
@ -368,7 +368,7 @@ results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
|
||||||
// Example, we have a context window of 512, we will compute perplexity for each of the
|
// Example, we have a context window of 512, we will compute perplexity for each of the
|
||||||
// last 256 tokens. Then, we split the input up into context window size chunks to
|
// last 256 tokens. Then, we split the input up into context window size chunks to
|
||||||
// process the entire prompt.
|
// process the entire prompt.
|
||||||
const int first = std::min(512, params.n_ctx/2);
|
const int first = params.n_ctx/2;
|
||||||
process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, params.n_ctx - 1 - first,
|
process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, params.n_ctx - 1 - first,
|
||||||
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
|
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
|
||||||
count += params.n_ctx - first - 1;
|
count += params.n_ctx - first - 1;
|
||||||
|
@ -668,11 +668,6 @@ int main(int argc, char ** argv) {
|
||||||
params.n_ctx += params.ppl_stride/2;
|
params.n_ctx += params.ppl_stride/2;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.n_ctx > 2048) {
|
|
||||||
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
|
||||||
"expect poor results\n", __func__, params.n_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
||||||
|
|
||||||
if (params.seed == LLAMA_DEFAULT_SEED) {
|
if (params.seed == LLAMA_DEFAULT_SEED) {
|
||||||
|
@ -698,6 +693,11 @@ int main(int argc, char ** argv) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.n_ctx > llama_n_ctx(ctx)) {
|
||||||
|
fprintf(stderr, "%s: warning: model might not support context sizes greater than %d tokens (%d specified);"
|
||||||
|
"expect poor results\n", __func__, llama_n_ctx(ctx), params.n_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
#include "grammar-parser.h"
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
@ -109,16 +110,35 @@ int main(int argc, char ** argv) {
|
||||||
// used to determine end of generation
|
// used to determine end of generation
|
||||||
bool has_eos = false;
|
bool has_eos = false;
|
||||||
|
|
||||||
|
// grammar stuff
|
||||||
|
struct llama_grammar * grammar_dft = NULL;
|
||||||
|
struct llama_grammar * grammar_tgt = NULL;
|
||||||
|
|
||||||
|
grammar_parser::parse_state parsed_grammar;
|
||||||
|
|
||||||
|
// if requested - load the grammar, error checking is omitted for brevity
|
||||||
|
if (!params.grammar.empty()) {
|
||||||
|
parsed_grammar = grammar_parser::parse(params.grammar.c_str());
|
||||||
|
// will be empty (default) if there are parse errors
|
||||||
|
if (parsed_grammar.rules.empty()) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
||||||
|
grammar_tgt = llama_grammar_init(grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
||||||
|
}
|
||||||
|
|
||||||
const auto t_dec_start = ggml_time_us();
|
const auto t_dec_start = ggml_time_us();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
LOG("drafted: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_dft, drafted));
|
LOG("drafted: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_dft, drafted));
|
||||||
|
|
||||||
// sample from the drafted tokens if any
|
|
||||||
int i_dft = 0;
|
int i_dft = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
const llama_token id = llama_sample_token(ctx_tgt, NULL, NULL, params, last_tokens, candidates, i_dft);
|
// sample from the target model
|
||||||
|
const llama_token id = llama_sample_token(ctx_tgt, NULL, grammar_tgt, params, last_tokens, candidates, i_dft);
|
||||||
|
|
||||||
|
// remember which tokens were sampled - used for repetition penalties during sampling
|
||||||
last_tokens.erase(last_tokens.begin());
|
last_tokens.erase(last_tokens.begin());
|
||||||
last_tokens.push_back(id);
|
last_tokens.push_back(id);
|
||||||
|
|
||||||
|
@ -134,8 +154,9 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
++n_predict;
|
++n_predict;
|
||||||
|
|
||||||
|
// check if the draft matches the target
|
||||||
if (i_dft < (int) drafted.size() && id == drafted[i_dft]) {
|
if (i_dft < (int) drafted.size() && id == drafted[i_dft]) {
|
||||||
LOG("drafted token %d accepted\n", id);
|
LOG("the sampled target token matches the %dth drafted token (%d, '%s') - accepted\n", i_dft, id, token_str.c_str());
|
||||||
++n_accept;
|
++n_accept;
|
||||||
++n_past_tgt;
|
++n_past_tgt;
|
||||||
++n_past_dft;
|
++n_past_dft;
|
||||||
|
@ -145,6 +166,14 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// the drafted token was rejected or we are out of drafted tokens
|
// the drafted token was rejected or we are out of drafted tokens
|
||||||
|
|
||||||
|
if (i_dft < (int) drafted.size()) {
|
||||||
|
LOG("the %dth drafted token (%d, '%s') does not match the sampled target token (%d, '%s') - rejected\n",
|
||||||
|
i_dft, drafted[i_dft], llama_token_to_piece(ctx_dft, drafted[i_dft]).c_str(), id, token_str.c_str());
|
||||||
|
} else {
|
||||||
|
LOG("out of drafted tokens\n");
|
||||||
|
}
|
||||||
|
|
||||||
llama_eval(ctx_dft, &id, 1, n_past_dft, params.n_threads);
|
llama_eval(ctx_dft, &id, 1, n_past_dft, params.n_threads);
|
||||||
++n_past_dft;
|
++n_past_dft;
|
||||||
|
|
||||||
|
@ -158,7 +187,16 @@ int main(int argc, char ** argv) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sample n_draft tokens from the draft model picking the best token
|
if (grammar_tgt) {
|
||||||
|
if (grammar_dft) {
|
||||||
|
llama_grammar_free(grammar_dft);
|
||||||
|
}
|
||||||
|
grammar_dft = llama_grammar_copy(grammar_tgt);
|
||||||
|
|
||||||
|
LOG("copied target grammar to draft grammar\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// sample n_draft tokens from the draft model using greedy decoding
|
||||||
int n_past_cur = n_past_dft;
|
int n_past_cur = n_past_dft;
|
||||||
for (int i = 0; i < n_draft; ++i) {
|
for (int i = 0; i < n_draft; ++i) {
|
||||||
float * logits = llama_get_logits(ctx_dft);
|
float * logits = llama_get_logits(ctx_dft);
|
||||||
|
@ -170,25 +208,40 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
llama_token_data_array cur_p = { candidates.data(), candidates.size(), false };
|
llama_token_data_array cur_p = { candidates.data(), candidates.size(), false };
|
||||||
|
|
||||||
|
if (grammar_dft != NULL) {
|
||||||
|
llama_sample_grammar(ctx_dft, &cur_p, grammar_dft);
|
||||||
|
}
|
||||||
|
|
||||||
// computes softmax and sorts the candidates
|
// computes softmax and sorts the candidates
|
||||||
llama_sample_softmax(ctx_dft, &cur_p);
|
llama_sample_softmax(ctx_dft, &cur_p);
|
||||||
|
|
||||||
for (int i = 0; i < 3; ++i) {
|
for (int i = 0; i < 3; ++i) {
|
||||||
LOG(" - draft candidate %d: %d (%.3f)\n", i, cur_p.data[i].id, cur_p.data[i].p);
|
LOG(" - draft candidate %3d: %6d (%8.3f) '%s'\n", i, cur_p.data[i].id, cur_p.data[i].p, llama_token_to_piece(ctx_dft, cur_p.data[i].id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// too low probability, stop drafting
|
// TODO: better logic?
|
||||||
if (cur_p.data[0].p < 2*cur_p.data[1].p) {
|
if (cur_p.data[0].p < 2*cur_p.data[1].p) {
|
||||||
|
LOG("stopping drafting, probability too low: %.3f < 2*%.3f\n", cur_p.data[0].p, cur_p.data[1].p);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
drafted.push_back(cur_p.data[0].id);
|
// drafted token
|
||||||
|
const llama_token id = cur_p.data[0].id;
|
||||||
|
|
||||||
|
drafted.push_back(id);
|
||||||
++n_drafted;
|
++n_drafted;
|
||||||
|
|
||||||
if (i < n_draft - 1) {
|
// no need to evaluate the last drafted token, since we won't use the result
|
||||||
|
if (i == n_draft - 1) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// evaluate the drafted token on the draft model
|
// evaluate the drafted token on the draft model
|
||||||
llama_eval(ctx_dft, &drafted.back(), 1, n_past_cur, params.n_threads);
|
llama_eval(ctx_dft, &drafted.back(), 1, n_past_cur, params.n_threads);
|
||||||
++n_past_cur;
|
++n_past_cur;
|
||||||
|
|
||||||
|
if (grammar_dft != NULL) {
|
||||||
|
llama_grammar_accept_token(ctx_dft, grammar_dft, id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,6 +249,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_eval(ctx_tgt, drafted.data(), drafted.size(), n_past_tgt, params.n_threads);
|
llama_eval(ctx_tgt, drafted.data(), drafted.size(), n_past_tgt, params.n_threads);
|
||||||
++n_past_tgt;
|
++n_past_tgt;
|
||||||
|
|
||||||
|
// the first token is always proposed by the traget model before the speculation loop
|
||||||
drafted.erase(drafted.begin());
|
drafted.erase(drafted.begin());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,6 +280,10 @@ int main(int argc, char ** argv) {
|
||||||
llama_free(ctx_dft);
|
llama_free(ctx_dft);
|
||||||
llama_free_model(model_dft);
|
llama_free_model(model_dft);
|
||||||
|
|
||||||
|
if (grammar_dft != NULL) {
|
||||||
|
llama_grammar_free(grammar_dft);
|
||||||
|
llama_grammar_free(grammar_tgt);
|
||||||
|
}
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
||||||
fprintf(stderr, "\n\n");
|
fprintf(stderr, "\n\n");
|
||||||
|
|
|
@ -1334,7 +1334,7 @@ void ggml_cl_free_data(const struct ggml_tensor* tensor) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cl_mem mem = (cl_mem)tensor->data;
|
cl_mem mem = (cl_mem)tensor->extra;
|
||||||
clReleaseMemObject(mem);
|
clReleaseMemObject(mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1393,7 +1393,7 @@ static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1,
|
||||||
size_t d_size;
|
size_t d_size;
|
||||||
|
|
||||||
cl_mem d_X = ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size); // src0
|
cl_mem d_X = ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size); // src0
|
||||||
cl_mem d_Y = (cl_mem) src1->data; // src1 is already on device, broadcasted.
|
cl_mem d_Y = (cl_mem) src1->extra; // src1 is already on device, broadcasted.
|
||||||
cl_mem d_D = ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size); // dst
|
cl_mem d_D = ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size); // dst
|
||||||
|
|
||||||
|
|
||||||
|
@ -1491,7 +1491,7 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
size_t d_size;
|
size_t d_size;
|
||||||
cl_mem d_X;
|
cl_mem d_X;
|
||||||
if (src0->backend == GGML_BACKEND_GPU) { // NOLINT
|
if (src0->backend == GGML_BACKEND_GPU) { // NOLINT
|
||||||
d_X = (cl_mem) src0->data;
|
d_X = (cl_mem) src0->extra;
|
||||||
} else {
|
} else {
|
||||||
d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
|
d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
|
||||||
}
|
}
|
||||||
|
@ -1567,7 +1567,7 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
size_t d_size;
|
size_t d_size;
|
||||||
cl_mem d_X;
|
cl_mem d_X;
|
||||||
if (src0->backend == GGML_BACKEND_GPU) { // NOLINT
|
if (src0->backend == GGML_BACKEND_GPU) { // NOLINT
|
||||||
d_X = (cl_mem) src0->data;
|
d_X = (cl_mem) src0->extra;
|
||||||
} else {
|
} else {
|
||||||
d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size);
|
d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size);
|
||||||
}
|
}
|
||||||
|
@ -1697,7 +1697,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
events.emplace_back();
|
events.emplace_back();
|
||||||
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
|
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
|
||||||
} else if (src0->backend == GGML_BACKEND_GPU) {
|
} else if (src0->backend == GGML_BACKEND_GPU) {
|
||||||
d_Q = (cl_mem) src0->data;
|
d_Q = (cl_mem) src0->extra;
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
@ -1860,6 +1860,6 @@ void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) {
|
||||||
|
|
||||||
CL_CHECK(clFinish(queue));
|
CL_CHECK(clFinish(queue));
|
||||||
|
|
||||||
tensor->data = dst;
|
tensor->extra = dst;
|
||||||
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
|
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
|
||||||
}
|
}
|
||||||
|
|
34
grammars/json_arr.gbnf
Normal file
34
grammars/json_arr.gbnf
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# This is the same as json.gbnf but we restrict whitespaces at the end of the root array
|
||||||
|
# Useful for generating JSON arrays
|
||||||
|
|
||||||
|
root ::= arr
|
||||||
|
value ::= object | array | string | number | ("true" | "false" | "null") ws
|
||||||
|
|
||||||
|
arr ::=
|
||||||
|
"[\n" ws (
|
||||||
|
value
|
||||||
|
(",\n" ws value)*
|
||||||
|
)? "]"
|
||||||
|
|
||||||
|
object ::=
|
||||||
|
"{" ws (
|
||||||
|
string ":" ws value
|
||||||
|
("," ws string ":" ws value)*
|
||||||
|
)? "}" ws
|
||||||
|
|
||||||
|
array ::=
|
||||||
|
"[" ws (
|
||||||
|
value
|
||||||
|
("," ws value)*
|
||||||
|
)? "]" ws
|
||||||
|
|
||||||
|
string ::=
|
||||||
|
"\"" (
|
||||||
|
[^"\\] |
|
||||||
|
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
||||||
|
)* "\"" ws
|
||||||
|
|
||||||
|
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
|
||||||
|
|
||||||
|
# Optional space: by convention, applied in this grammar after literal chars when allowed
|
||||||
|
ws ::= ([ \t\n] ws)?
|
|
@ -83,7 +83,7 @@ static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t *
|
||||||
float ax = fabsf(x[i]);
|
float ax = fabsf(x[i]);
|
||||||
if (ax > amax) { amax = ax; max = x[i]; }
|
if (ax > amax) { amax = ax; max = x[i]; }
|
||||||
}
|
}
|
||||||
if (!amax) { // all zero
|
if (amax < 1e-30f) { // all zero
|
||||||
for (int i = 0; i < n; ++i) {
|
for (int i = 0; i < n; ++i) {
|
||||||
L[i] = 0;
|
L[i] = 0;
|
||||||
}
|
}
|
||||||
|
@ -1086,6 +1086,12 @@ void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!max_abs_scale) {
|
||||||
|
memset(&y[i], 0, sizeof(block_q6_K));
|
||||||
|
y[i].d = ggml_fp32_to_fp16(0.f);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
float iscale = -128.f/max_scale;
|
float iscale = -128.f/max_scale;
|
||||||
y[i].d = ggml_fp32_to_fp16(1/iscale);
|
y[i].d = ggml_fp32_to_fp16(1/iscale);
|
||||||
for (int ib = 0; ib < QK_K/16; ++ib) {
|
for (int ib = 0; ib < QK_K/16; ++ib) {
|
||||||
|
|
34
llama.cpp
34
llama.cpp
|
@ -3402,7 +3402,12 @@ static bool llama_eval_internal(
|
||||||
|
|
||||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||||
n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
|
// TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
|
||||||
|
// we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
|
||||||
|
// with the BLAS calls. need a better solution
|
||||||
|
if (N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
|
||||||
|
n_threads = std::min(4, n_threads);
|
||||||
|
}
|
||||||
|
|
||||||
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
|
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
|
||||||
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
|
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
|
||||||
|
@ -4310,6 +4315,25 @@ void llama_grammar_free(struct llama_grammar * grammar) {
|
||||||
delete grammar;
|
delete grammar;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
|
||||||
|
llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
|
||||||
|
|
||||||
|
// redirect elements in stacks to point to new rules
|
||||||
|
for (size_t is = 0; is < result->stacks.size(); is++) {
|
||||||
|
for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
|
||||||
|
for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
|
||||||
|
for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
|
||||||
|
if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
|
||||||
|
result->stacks[is][ie] = &result->rules[ir0][ir1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// sampling
|
// sampling
|
||||||
//
|
//
|
||||||
|
@ -5800,7 +5824,7 @@ struct llama_context_params llama_context_default_params() {
|
||||||
/*.seed =*/ LLAMA_DEFAULT_SEED,
|
/*.seed =*/ LLAMA_DEFAULT_SEED,
|
||||||
/*.n_ctx =*/ 512,
|
/*.n_ctx =*/ 512,
|
||||||
/*.n_batch =*/ 512,
|
/*.n_batch =*/ 512,
|
||||||
/*.gpu_layers =*/ 0,
|
/*.n_gpu_layers =*/ 0,
|
||||||
/*.main_gpu =*/ 0,
|
/*.main_gpu =*/ 0,
|
||||||
/*.tensor_split =*/ nullptr,
|
/*.tensor_split =*/ nullptr,
|
||||||
/*.rope_freq_base =*/ 10000.0f,
|
/*.rope_freq_base =*/ 10000.0f,
|
||||||
|
@ -5817,6 +5841,10 @@ struct llama_context_params llama_context_default_params() {
|
||||||
/*.embedding =*/ false,
|
/*.embedding =*/ false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
result.n_gpu_layers = 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6009,7 +6037,6 @@ struct llama_context * llama_new_context_with_model(
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (params.n_gpu_layers > 0) {
|
if (params.n_gpu_layers > 0) {
|
||||||
|
@ -6046,6 +6073,7 @@ struct llama_context * llama_new_context_with_model(
|
||||||
#undef LLAMA_METAL_CHECK_BUF
|
#undef LLAMA_METAL_CHECK_BUF
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ctx->ctx_mpi = ggml_mpi_init();
|
ctx->ctx_mpi = ggml_mpi_init();
|
||||||
|
|
2
llama.h
2
llama.h
|
@ -410,6 +410,8 @@ extern "C" {
|
||||||
|
|
||||||
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
|
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
|
||||||
|
|
||||||
|
LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Sampling functions
|
// Sampling functions
|
||||||
//
|
//
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue