merge conflict
This commit is contained in:
commit
63cef16956
5 changed files with 146 additions and 96 deletions
146
Makefile
146
Makefile
|
@ -67,21 +67,21 @@ OPT = -Ofast
|
||||||
else
|
else
|
||||||
OPT = -O3
|
OPT = -O3
|
||||||
endif
|
endif
|
||||||
CFLAGS = -I. $(OPT) -std=c11 -fPIC
|
MK_CPPFLAGS = -I. -Icommon
|
||||||
CXXFLAGS = -I. -I./common $(OPT) -std=c++11 -fPIC
|
MK_CFLAGS = $(CPPFLAGS) $(OPT) -std=c11 -fPIC
|
||||||
LDFLAGS =
|
MK_CXXFLAGS = $(CPPFLAGS) $(OPT) -std=c++11 -fPIC
|
||||||
|
MK_LDFLAGS =
|
||||||
|
|
||||||
ifdef LLAMA_DEBUG
|
ifdef LLAMA_DEBUG
|
||||||
CFLAGS += -O0 -g
|
MK_CFLAGS += -O0 -g
|
||||||
CXXFLAGS += -O0 -g
|
MK_CXXFLAGS += -O0 -g
|
||||||
LDFLAGS += -g
|
MK_LDFLAGS += -g
|
||||||
else
|
else
|
||||||
CFLAGS += -DNDEBUG
|
MK_CPPFLAGS += -DNDEBUG
|
||||||
CXXFLAGS += -DNDEBUG
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_SERVER_VERBOSE
|
ifdef LLAMA_SERVER_VERBOSE
|
||||||
CXXFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
|
MK_CPPFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_DISABLE_LOGS
|
ifdef LLAMA_DISABLE_LOGS
|
||||||
|
@ -90,9 +90,9 @@ ifdef LLAMA_DISABLE_LOGS
|
||||||
endif # LLAMA_DISABLE_LOGS
|
endif # LLAMA_DISABLE_LOGS
|
||||||
|
|
||||||
# warnings
|
# warnings
|
||||||
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith \
|
MK_CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith \
|
||||||
-Wmissing-prototypes -Werror=implicit-int -Wno-unused-function
|
-Wmissing-prototypes -Werror=implicit-int -Wno-unused-function
|
||||||
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
|
MK_CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
|
||||||
|
|
||||||
ifeq '' '$(findstring clang++,$(CXX))'
|
ifeq '' '$(findstring clang++,$(CXX))'
|
||||||
# g++ only
|
# g++ only
|
||||||
|
@ -101,29 +101,9 @@ endif
|
||||||
|
|
||||||
# OS specific
|
# OS specific
|
||||||
# TODO: support Windows
|
# TODO: support Windows
|
||||||
ifeq ($(UNAME_S),Linux)
|
ifneq '' '$(filter $(UNAME_S),Linux Darwin FreeBSD NetBSD OpenBSD Haiku)'
|
||||||
CFLAGS += -pthread
|
MK_CFLAGS += -pthread
|
||||||
CXXFLAGS += -pthread
|
MK_CXXFLAGS += -pthread
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
CFLAGS += -pthread
|
|
||||||
CXXFLAGS += -pthread
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),FreeBSD)
|
|
||||||
CFLAGS += -pthread
|
|
||||||
CXXFLAGS += -pthread
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),NetBSD)
|
|
||||||
CFLAGS += -pthread
|
|
||||||
CXXFLAGS += -pthread
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),OpenBSD)
|
|
||||||
CFLAGS += -pthread
|
|
||||||
CXXFLAGS += -pthread
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),Haiku)
|
|
||||||
CFLAGS += -pthread
|
|
||||||
CXXFLAGS += -pthread
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# detect Windows
|
# detect Windows
|
||||||
|
@ -149,12 +129,11 @@ ifeq ($(_WIN32),1)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_GPROF
|
ifdef LLAMA_GPROF
|
||||||
CFLAGS += -pg
|
MK_CFLAGS += -pg
|
||||||
CXXFLAGS += -pg
|
MK_CXXFLAGS += -pg
|
||||||
endif
|
endif
|
||||||
ifdef LLAMA_PERF
|
ifdef LLAMA_PERF
|
||||||
CFLAGS += -DGGML_PERF
|
MK_CPPFLAGS += -DGGML_PERF
|
||||||
CXXFLAGS += -DGGML_PERF
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Architecture specific
|
# Architecture specific
|
||||||
|
@ -165,16 +144,16 @@ ifndef RISCV
|
||||||
|
|
||||||
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
|
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
|
||||||
# Use all CPU extensions that are available:
|
# Use all CPU extensions that are available:
|
||||||
CFLAGS += -march=native -mtune=native
|
MK_CFLAGS += -march=native -mtune=native
|
||||||
CXXFLAGS += -march=native -mtune=native
|
MK_CXXFLAGS += -march=native -mtune=native
|
||||||
|
|
||||||
# Usage AVX-only
|
# Usage AVX-only
|
||||||
#CFLAGS += -mfma -mf16c -mavx
|
#MK_CFLAGS += -mfma -mf16c -mavx
|
||||||
#CXXFLAGS += -mfma -mf16c -mavx
|
#MK_CXXFLAGS += -mfma -mf16c -mavx
|
||||||
|
|
||||||
# Usage SSSE3-only (Not is SSE3!)
|
# Usage SSSE3-only (Not is SSE3!)
|
||||||
#CFLAGS += -mssse3
|
#MK_CFLAGS += -mssse3
|
||||||
#CXXFLAGS += -mssse3
|
#MK_CXXFLAGS += -mssse3
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# The stack is only 16-byte aligned on Windows, so don't let gcc emit aligned moves.
|
# The stack is only 16-byte aligned on Windows, so don't let gcc emit aligned moves.
|
||||||
|
@ -188,34 +167,33 @@ endif
|
||||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||||
# Apple M1, M2, etc.
|
# Apple M1, M2, etc.
|
||||||
# Raspberry Pi 3, 4, Zero 2 (64-bit)
|
# Raspberry Pi 3, 4, Zero 2 (64-bit)
|
||||||
CFLAGS += -mcpu=native
|
MK_CFLAGS += -mcpu=native
|
||||||
CXXFLAGS += -mcpu=native
|
MK_CXXFLAGS += -mcpu=native
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||||
# Raspberry Pi 1, Zero
|
# Raspberry Pi 1, Zero
|
||||||
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
|
MK_CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
|
||||||
|
MK_CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(filter armv7%,$(UNAME_M)),)
|
ifneq ($(filter armv7%,$(UNAME_M)),)
|
||||||
# Raspberry Pi 2
|
# Raspberry Pi 2
|
||||||
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
|
MK_CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
|
||||||
|
MK_CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(filter armv8%,$(UNAME_M)),)
|
ifneq ($(filter armv8%,$(UNAME_M)),)
|
||||||
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
||||||
CFLAGS += -mfp16-format=ieee -mno-unaligned-access
|
MK_CFLAGS += -mfp16-format=ieee -mno-unaligned-access
|
||||||
|
MK_CXXFLAGS += -mfp16-format=ieee -mno-unaligned-access
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
||||||
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
||||||
ifneq (,$(findstring POWER9,$(POWER9_M)))
|
ifneq (,$(findstring POWER9,$(POWER9_M)))
|
||||||
CFLAGS += -mcpu=power9
|
MK_CFLAGS += -mcpu=power9
|
||||||
CXXFLAGS += -mcpu=power9
|
MK_CXXFLAGS += -mcpu=power9
|
||||||
endif
|
|
||||||
# Require c++23's std::byteswap for big-endian support.
|
|
||||||
ifeq ($(UNAME_M),ppc64)
|
|
||||||
CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
|
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -225,12 +203,10 @@ else
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef LLAMA_NO_K_QUANTS
|
ifndef LLAMA_NO_K_QUANTS
|
||||||
CFLAGS += -DGGML_USE_K_QUANTS
|
MK_CPPFLAGS += -DGGML_USE_K_QUANTS
|
||||||
CXXFLAGS += -DGGML_USE_K_QUANTS
|
|
||||||
OBJS += k_quants.o
|
OBJS += k_quants.o
|
||||||
ifdef LLAMA_QKK_64
|
ifdef LLAMA_QKK_64
|
||||||
CFLAGS += -DGGML_QKK_64
|
MK_CPPFLAGS += -DGGML_QKK_64
|
||||||
CXXFLAGS += -DGGML_QKK_64
|
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -238,31 +214,32 @@ ifndef LLAMA_NO_ACCELERATE
|
||||||
# Mac M1 - include Accelerate framework.
|
# Mac M1 - include Accelerate framework.
|
||||||
# `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
|
# `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
CFLAGS += -DGGML_USE_ACCELERATE
|
MK_CPPFLAGS += -DGGML_USE_ACCELERATE
|
||||||
LDFLAGS += -framework Accelerate
|
MK_LDFLAGS += -framework Accelerate
|
||||||
endif
|
endif
|
||||||
endif # LLAMA_NO_ACCELERATE
|
endif # LLAMA_NO_ACCELERATE
|
||||||
|
|
||||||
ifdef LLAMA_MPI
|
ifdef LLAMA_MPI
|
||||||
CFLAGS += -DGGML_USE_MPI -Wno-cast-qual
|
MK_CPPFLAGS += -DGGML_USE_MPI
|
||||||
CXXFLAGS += -DGGML_USE_MPI -Wno-cast-qual
|
MK_CFLAGS += -Wno-cast-qual
|
||||||
|
MK_CXXFLAGS += -Wno-cast-qual
|
||||||
OBJS += ggml-mpi.o
|
OBJS += ggml-mpi.o
|
||||||
endif # LLAMA_MPI
|
endif # LLAMA_MPI
|
||||||
|
|
||||||
ifdef LLAMA_OPENBLAS
|
ifdef LLAMA_OPENBLAS
|
||||||
CFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags openblas)
|
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas)
|
||||||
LDFLAGS += $(shell pkg-config --libs openblas)
|
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas)
|
||||||
|
MK_LDFLAGS += $(shell pkg-config --libs openblas)
|
||||||
endif # LLAMA_OPENBLAS
|
endif # LLAMA_OPENBLAS
|
||||||
|
|
||||||
ifdef LLAMA_BLIS
|
ifdef LLAMA_BLIS
|
||||||
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
|
MK_CPPFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
|
||||||
LDFLAGS += -lblis -L/usr/local/lib
|
MK_LDFLAGS += -lblis -L/usr/local/lib
|
||||||
endif # LLAMA_BLIS
|
endif # LLAMA_BLIS
|
||||||
|
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUBLAS
|
||||||
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
|
MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
|
||||||
CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
|
MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
|
||||||
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
|
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
|
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
|
||||||
ifdef LLAMA_CUDA_NVCC
|
ifdef LLAMA_CUDA_NVCC
|
||||||
|
@ -313,14 +290,15 @@ endif # LLAMA_CUBLAS
|
||||||
|
|
||||||
ifdef LLAMA_CLBLAST
|
ifdef LLAMA_CLBLAST
|
||||||
|
|
||||||
CFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags clblast OpenCL)
|
MK_CPPFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags-only-I clblast OpenCL)
|
||||||
CXXFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags clblast OpenCL)
|
MK_CFLAGS += $(shell pkg-config --cflags-only-other clblast OpenCL)
|
||||||
|
MK_CXXFLAGS += $(shell pkg-config --cflags-only-other clblast OpenCL)
|
||||||
|
|
||||||
# Mac provides OpenCL as a framework
|
# Mac provides OpenCL as a framework
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
LDFLAGS += -lclblast -framework OpenCL
|
MK_LDFLAGS += -lclblast -framework OpenCL
|
||||||
else
|
else
|
||||||
LDFLAGS += $(shell pkg-config --libs clblast OpenCL)
|
MK_LDFLAGS += $(shell pkg-config --libs clblast OpenCL)
|
||||||
endif
|
endif
|
||||||
OBJS += ggml-opencl.o
|
OBJS += ggml-opencl.o
|
||||||
|
|
||||||
|
@ -335,10 +313,9 @@ ifdef LLAMA_HIPBLAS
|
||||||
LLAMA_CUDA_DMMV_X ?= 32
|
LLAMA_CUDA_DMMV_X ?= 32
|
||||||
LLAMA_CUDA_MMV_Y ?= 1
|
LLAMA_CUDA_MMV_Y ?= 1
|
||||||
LLAMA_CUDA_KQUANTS_ITER ?= 2
|
LLAMA_CUDA_KQUANTS_ITER ?= 2
|
||||||
CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
MK_CPPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
||||||
CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
||||||
LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
||||||
LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
|
||||||
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
||||||
HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
|
HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
|
||||||
HIPFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
|
HIPFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
|
||||||
|
@ -353,9 +330,8 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
||||||
endif # LLAMA_HIPBLAS
|
endif # LLAMA_HIPBLAS
|
||||||
|
|
||||||
ifdef LLAMA_METAL
|
ifdef LLAMA_METAL
|
||||||
CFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG
|
MK_CPPFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG
|
||||||
CXXFLAGS += -DGGML_USE_METAL
|
MK_LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
|
||||||
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
|
|
||||||
OBJS += ggml-metal.o
|
OBJS += ggml-metal.o
|
||||||
endif # LLAMA_METAL
|
endif # LLAMA_METAL
|
||||||
|
|
||||||
|
@ -369,11 +345,17 @@ ggml-mpi.o: ggml-mpi.c ggml-mpi.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
endif # LLAMA_MPI
|
endif # LLAMA_MPI
|
||||||
|
|
||||||
ifdef LLAMA_NO_K_QUANTS
|
ifndef LLAMA_NO_K_QUANTS
|
||||||
k_quants.o: k_quants.c k_quants.h
|
k_quants.o: k_quants.c k_quants.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
endif # LLAMA_NO_K_QUANTS
|
endif # LLAMA_NO_K_QUANTS
|
||||||
|
|
||||||
|
# combine build flags with cmdline overrides
|
||||||
|
override CPPFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS)
|
||||||
|
override CFLAGS := $(MK_CFLAGS) $(CFLAGS)
|
||||||
|
override CXXFLAGS := $(MK_CXXFLAGS) $(CXXFLAGS)
|
||||||
|
override LDFLAGS := $(MK_LDFLAGS) $(LDFLAGS)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Print build information
|
# Print build information
|
||||||
#
|
#
|
||||||
|
|
|
@ -120,6 +120,7 @@ as the main playground for developing new features for the [ggml](https://github
|
||||||
|
|
||||||
- [nat/openplayground](https://github.com/nat/openplayground)
|
- [nat/openplayground](https://github.com/nat/openplayground)
|
||||||
- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui)
|
- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui)
|
||||||
|
- [withcatai/catai](https://github.com/withcatai/catai)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
30
convert.py
30
convert.py
|
@ -323,15 +323,27 @@ class BpeVocab:
|
||||||
self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
|
self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
|
||||||
added_tokens: dict[str, int]
|
added_tokens: dict[str, int]
|
||||||
if fname_added_tokens is not None:
|
if fname_added_tokens is not None:
|
||||||
|
# FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
|
||||||
added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
|
added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
|
||||||
else:
|
else:
|
||||||
|
# Fall back to trying to find the added tokens in tokenizer.json
|
||||||
|
tokenizer_json_file = fname_tokenizer.parent / 'tokenizer.json'
|
||||||
|
if not tokenizer_json_file.is_file():
|
||||||
added_tokens = {}
|
added_tokens = {}
|
||||||
|
else:
|
||||||
|
tokenizer_json = json.load(open(tokenizer_json_file, encoding="utf-8"))
|
||||||
|
added_tokens = dict(
|
||||||
|
(item['content'], item['id'])
|
||||||
|
for item in tokenizer_json.get('added_tokens', [])
|
||||||
|
# Added tokens here can be duplicates of the main vocabulary.
|
||||||
|
if item['content'] not in self.bpe_tokenizer )
|
||||||
|
|
||||||
vocab_size: int = len(self.bpe_tokenizer)
|
vocab_size: int = len(self.bpe_tokenizer)
|
||||||
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
||||||
actual_ids = sorted(added_tokens.values())
|
actual_ids = sorted(added_tokens.values())
|
||||||
if expected_ids != actual_ids:
|
if expected_ids != actual_ids:
|
||||||
raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
|
expected_end_id = vocab_size + len(actual_ids) - 1
|
||||||
|
raise Exception(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range {vocab_size} - {expected_end_id}; got {actual_ids}")
|
||||||
|
|
||||||
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
|
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
|
||||||
self.added_tokens_list = [text for (text, idx) in items]
|
self.added_tokens_list = [text for (text, idx) in items]
|
||||||
|
@ -345,10 +357,22 @@ class BpeVocab:
|
||||||
from transformers.models.gpt2 import tokenization_gpt2 # type: ignore[import]
|
from transformers.models.gpt2 import tokenization_gpt2 # type: ignore[import]
|
||||||
byte_encoder = tokenization_gpt2.bytes_to_unicode()
|
byte_encoder = tokenization_gpt2.bytes_to_unicode()
|
||||||
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||||
|
score = 0.0
|
||||||
for i, item in enumerate(tokenizer):
|
for i, item in enumerate(tokenizer):
|
||||||
text: bytes = item.encode("utf-8")
|
text: bytes = item.encode("utf-8")
|
||||||
score: float = -i
|
# FIXME: These shouldn't be hardcoded, but it's probably better than the current behavior?
|
||||||
yield text, score, gguf.TokenType.USER_DEFINED
|
if i <= 258 and text.startswith(b'<') and text.endswith(b'>'):
|
||||||
|
if i == 0 and text == b'<unk>':
|
||||||
|
toktype = gguf.TokenType.UNKNOWN
|
||||||
|
elif i == 1 or i == 2:
|
||||||
|
toktype = gguf.TokenType.CONTROL
|
||||||
|
elif i >= 3 and text.startswith(b'<0x'):
|
||||||
|
toktype = gguf.TokenType.BYTE
|
||||||
|
else:
|
||||||
|
toktype = gguf.TokenType.NORMAL
|
||||||
|
else:
|
||||||
|
toktype = gguf.TokenType.NORMAL
|
||||||
|
yield text, score, toktype
|
||||||
|
|
||||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||||
for text in self.added_tokens_list:
|
for text in self.added_tokens_list:
|
||||||
|
|
|
@ -660,9 +660,10 @@ bool gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt2
|
||||||
ggml_tensor * gpt_neox_ff(
|
ggml_tensor * gpt_neox_ff(
|
||||||
const gpt_neox_block &block,
|
const gpt_neox_block &block,
|
||||||
ggml_context * ctx0,
|
ggml_context * ctx0,
|
||||||
ggml_tensor * inp) {
|
ggml_tensor * inp,
|
||||||
|
const gpt_neox_hparams &hparams) {
|
||||||
|
|
||||||
ggml_tensor * cur = ggml_norm(ctx0, inp);
|
ggml_tensor * cur = ggml_norm(ctx0, inp, hparams.norm_eps);
|
||||||
|
|
||||||
cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, block.ln_2_g, cur), cur), ggml_repeat(ctx0, block.ln_2_b, cur));
|
cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, block.ln_2_g, cur), cur), ggml_repeat(ctx0, block.ln_2_b, cur));
|
||||||
cur = ggml_mul_mat(ctx0, block.c_mlp_fc_w, cur);
|
cur = ggml_mul_mat(ctx0, block.c_mlp_fc_w, cur);
|
||||||
|
@ -753,7 +754,7 @@ bool gpt_neox_eval(
|
||||||
// self-attention
|
// self-attention
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
cur = ggml_norm(ctx0, inpL);
|
cur = ggml_norm(ctx0, inpL, hparams.norm_eps);
|
||||||
|
|
||||||
cur = ggml_add(ctx0,
|
cur = ggml_add(ctx0,
|
||||||
ggml_mul(ctx0, ggml_repeat(ctx0, model.blocks[il].ln_1_g, cur), cur),
|
ggml_mul(ctx0, ggml_repeat(ctx0, model.blocks[il].ln_1_g, cur), cur),
|
||||||
|
@ -844,7 +845,7 @@ bool gpt_neox_eval(
|
||||||
if (hparams.par_res == 0) {
|
if (hparams.par_res == 0) {
|
||||||
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL);
|
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL);
|
||||||
|
|
||||||
cur = gpt_neox_ff(model.blocks[il], ctx0, inpFF);
|
cur = gpt_neox_ff(model.blocks[il], ctx0, inpFF, hparams);
|
||||||
|
|
||||||
// input for next layer
|
// input for next layer
|
||||||
inpL = ggml_add(ctx0, cur, inpFF);
|
inpL = ggml_add(ctx0, cur, inpFF);
|
||||||
|
@ -853,7 +854,7 @@ bool gpt_neox_eval(
|
||||||
|
|
||||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||||
// note here we pass inpL instead of cur
|
// note here we pass inpL instead of cur
|
||||||
cur = gpt_neox_ff(model.blocks[il], ctx0, inpL);
|
cur = gpt_neox_ff(model.blocks[il], ctx0, inpL, hparams);
|
||||||
|
|
||||||
// layer input + FF
|
// layer input + FF
|
||||||
cur = ggml_add(ctx0, cur, inpFF);
|
cur = ggml_add(ctx0, cur, inpFF);
|
||||||
|
@ -867,7 +868,7 @@ bool gpt_neox_eval(
|
||||||
|
|
||||||
// norm
|
// norm
|
||||||
{
|
{
|
||||||
inpL = ggml_norm(ctx0, inpL);
|
inpL = ggml_norm(ctx0, inpL, hparams.norm_eps);
|
||||||
|
|
||||||
// inpL = ln_f_g*inpL + ln_f_b
|
// inpL = ln_f_g*inpL + ln_f_b
|
||||||
inpL = ggml_add(ctx0,
|
inpL = ggml_add(ctx0,
|
||||||
|
|
42
llama.cpp
42
llama.cpp
|
@ -325,6 +325,44 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES =
|
||||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_GPT2,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_GPTJ,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_GPTNEOX,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_MPT,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_UNKNOWN,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static llm_arch llm_arch_from_string(const std::string & name) {
|
static llm_arch llm_arch_from_string(const std::string & name) {
|
||||||
|
@ -1605,10 +1643,14 @@ static void llm_load_hparams(
|
||||||
|
|
||||||
GGUF_GET_KEY(ctx, hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
|
GGUF_GET_KEY(ctx, hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
|
||||||
|
|
||||||
|
if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
|
||||||
if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
|
if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
|
||||||
throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
|
throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// gpt-neox n_rot = rotary_pct * (n_embd / n_head)
|
||||||
|
// gpt-j n_rot = rotary_dim
|
||||||
|
}
|
||||||
|
|
||||||
// arch-specific KVs
|
// arch-specific KVs
|
||||||
switch (model.arch) {
|
switch (model.arch) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue