Merge branch 'master' into ik/metal_pp
This commit is contained in:
commit
0c17b08cc7
35 changed files with 437 additions and 288 deletions
|
@ -3,6 +3,7 @@ Checks: >
|
|||
bugprone-*,
|
||||
-bugprone-easily-swappable-parameters,
|
||||
-bugprone-implicit-widening-of-multiplication-result,
|
||||
-bugprone-misplaced-widening-cast,
|
||||
-bugprone-narrowing-conversions,
|
||||
readability-*,
|
||||
-readability-avoid-unconditional-preprocessor-if,
|
||||
|
@ -15,4 +16,8 @@ Checks: >
|
|||
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
|
||||
performance-*,
|
||||
portability-*,
|
||||
misc-*,
|
||||
-misc-const-correctness,
|
||||
-misc-non-private-member-variables-in-classes,
|
||||
-misc-no-recursion,
|
||||
FormatStyle: none
|
||||
|
|
|
@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
|||
ARG CUDA_DOCKER_ARCH=all
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential python3 python3-pip
|
||||
apt-get install -y build-essential python3 python3-pip git
|
||||
|
||||
COPY requirements.txt requirements.txt
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
|||
ARG CUDA_DOCKER_ARCH=all
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential
|
||||
apt-get install -y build-essential git
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
|
|
@ -426,7 +426,7 @@ if (LLAMA_ALL_WARNINGS)
|
|||
)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
# g++ only
|
||||
set(cxx_flags ${cxx_flags} -Wno-format-truncation)
|
||||
set(cxx_flags ${cxx_flags} -Wno-format-truncation -Wno-array-bounds)
|
||||
endif()
|
||||
else()
|
||||
# todo : msvc
|
||||
|
@ -551,12 +551,64 @@ else()
|
|||
message(STATUS "Unknown architecture")
|
||||
endif()
|
||||
|
||||
#
|
||||
# POSIX conformance
|
||||
#
|
||||
|
||||
# clock_gettime came in POSIX.1b (1993)
|
||||
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
||||
# posix_memalign came in POSIX.1-2001 / SUSv3
|
||||
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
||||
add_compile_definitions(_XOPEN_SOURCE=600)
|
||||
|
||||
# Somehow in OpenBSD whenever POSIX conformance is specified
|
||||
# some string functions rely on locale_t availability,
|
||||
# which was introduced in POSIX.1-2008, forcing us to go higher
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||
remove_definitions(-D_XOPEN_SOURCE=600)
|
||||
add_compile_definitions(_XOPEN_SOURCE=700)
|
||||
endif()
|
||||
|
||||
# Data types, macros and functions related to controlling CPU affinity and
|
||||
# some memory allocation are available on Linux through GNU extensions in libc
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
add_compile_definitions(_GNU_SOURCE)
|
||||
endif()
|
||||
|
||||
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
||||
# and on macOS its availability depends on enabling Darwin extensions
|
||||
# similarly on DragonFly, enabling BSD extensions is necessary
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_compile_definitions(_DARWIN_C_SOURCE)
|
||||
endif()
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "DragonFly")
|
||||
add_compile_definitions(_DARWIN_C_SOURCE)
|
||||
endif()
|
||||
|
||||
# alloca is a non-standard interface that is not visible on BSDs when
|
||||
# POSIX conformance is specified, but not all of them provide a clean way
|
||||
# to enable it in such cases
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
add_compile_definitions(__BSD_VISIBLE)
|
||||
endif()
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "NetBSD")
|
||||
add_compile_definitions(_NETBSD_SOURCE)
|
||||
endif()
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||
add_compile_definitions(_BSD_SOURCE)
|
||||
endif()
|
||||
|
||||
#
|
||||
# libraries
|
||||
#
|
||||
|
||||
# ggml
|
||||
|
||||
if (GGML_USE_CPU_HBM)
|
||||
add_definitions(-DGGML_USE_CPU_HBM)
|
||||
find_library(memkind memkind REQUIRED)
|
||||
endif()
|
||||
|
||||
add_library(ggml OBJECT
|
||||
ggml.c
|
||||
ggml.h
|
||||
|
@ -572,6 +624,9 @@ add_library(ggml OBJECT
|
|||
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
|
||||
target_compile_features(ggml PUBLIC c_std_11) # don't bump
|
||||
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
|
||||
if (GGML_USE_CPU_HBM)
|
||||
target_link_libraries(ggml PUBLIC memkind)
|
||||
endif()
|
||||
|
||||
add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
|
||||
if (BUILD_SHARED_LIBS)
|
||||
|
|
82
Makefile
82
Makefile
|
@ -42,9 +42,9 @@ endif
|
|||
|
||||
default: $(BUILD_TARGETS)
|
||||
|
||||
test:
|
||||
@echo "Running tests..."
|
||||
@for test_target in $(TEST_TARGETS); do \
|
||||
test: $(TEST_TARGETS)
|
||||
@failures=0; \
|
||||
for test_target in $(TEST_TARGETS); do \
|
||||
if [ "$$test_target" = "tests/test-tokenizer-0-llama" ]; then \
|
||||
./$$test_target $(CURDIR)/models/ggml-vocab-llama.gguf; \
|
||||
elif [ "$$test_target" = "tests/test-tokenizer-0-falcon" ]; then \
|
||||
|
@ -52,10 +52,21 @@ test:
|
|||
elif [ "$$test_target" = "tests/test-tokenizer-1" ]; then \
|
||||
continue; \
|
||||
else \
|
||||
echo "Running test $$test_target..."; \
|
||||
./$$test_target; \
|
||||
fi; \
|
||||
done
|
||||
@echo "All tests have been run."
|
||||
if [ $$? -ne 0 ]; then \
|
||||
printf 'Test $$test_target FAILED!\n\n' $$test_target; \
|
||||
failures=$$(( failures + 1 )); \
|
||||
else \
|
||||
printf 'Test %s passed.\n\n' $$test_target; \
|
||||
fi; \
|
||||
done; \
|
||||
if [ $$failures -gt 0 ]; then \
|
||||
printf '\n%s tests failed.\n' $$failures; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo 'All tests passed.'
|
||||
|
||||
all: $(BUILD_TARGETS) $(TEST_TARGETS)
|
||||
|
||||
|
@ -91,10 +102,60 @@ else
|
|||
OPT = -O3
|
||||
endif
|
||||
MK_CPPFLAGS = -I. -Icommon
|
||||
MK_CFLAGS = $(CPPFLAGS) $(OPT) -std=c11 -fPIC
|
||||
MK_CXXFLAGS = $(CPPFLAGS) $(OPT) -std=c++11 -fPIC
|
||||
MK_CFLAGS = $(OPT) -std=c11 -fPIC
|
||||
MK_CXXFLAGS = $(OPT) -std=c++11 -fPIC
|
||||
MK_LDFLAGS =
|
||||
|
||||
# clock_gettime came in POSIX.1b (1993)
|
||||
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
||||
# posix_memalign came in POSIX.1-2001 / SUSv3
|
||||
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
||||
MK_CFLAGS += -D_XOPEN_SOURCE=600
|
||||
MK_CXXFLAGS += -D_XOPEN_SOURCE=600
|
||||
|
||||
# Somehow in OpenBSD whenever POSIX conformance is specified
|
||||
# some string functions rely on locale_t availability,
|
||||
# which was introduced in POSIX.1-2008, forcing us to go higher
|
||||
ifeq ($(UNAME_S),OpenBSD)
|
||||
MK_CFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700
|
||||
MK_CXXFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700
|
||||
endif
|
||||
|
||||
# Data types, macros and functions related to controlling CPU affinity and
|
||||
# some memory allocation are available on Linux through GNU extensions in libc
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
MK_CFLAGS += -D_GNU_SOURCE
|
||||
MK_CXXFLAGS += -D_GNU_SOURCE
|
||||
endif
|
||||
|
||||
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
||||
# and on macOS its availability depends on enabling Darwin extensions
|
||||
# similarly on DragonFly, enabling BSD extensions is necessary
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
MK_CFLAGS += -D_DARWIN_C_SOURCE
|
||||
MK_CXXFLAGS += -D_DARWIN_C_SOURCE
|
||||
endif
|
||||
ifeq ($(UNAME_S),DragonFly)
|
||||
MK_CFLAGS += -D__BSD_VISIBLE
|
||||
MK_CXXFLAGS += -D__BSD_VISIBLE
|
||||
endif
|
||||
|
||||
# alloca is a non-standard interface that is not visible on BSDs when
|
||||
# POSIX conformance is specified, but not all of them provide a clean way
|
||||
# to enable it in such cases
|
||||
ifeq ($(UNAME_S),FreeBSD)
|
||||
MK_CFLAGS += -D__BSD_VISIBLE
|
||||
MK_CXXFLAGS += -D__BSD_VISIBLE
|
||||
endif
|
||||
ifeq ($(UNAME_S),NetBSD)
|
||||
MK_CFLAGS += -D_NETBSD_SOURCE
|
||||
MK_CXXFLAGS += -D_NETBSD_SOURCE
|
||||
endif
|
||||
ifeq ($(UNAME_S),OpenBSD)
|
||||
MK_CFLAGS += -D_BSD_SOURCE
|
||||
MK_CXXFLAGS += -D_BSD_SOURCE
|
||||
endif
|
||||
|
||||
ifdef LLAMA_DEBUG
|
||||
MK_CFLAGS += -O0 -g
|
||||
MK_CXXFLAGS += -O0 -g
|
||||
|
@ -123,7 +184,7 @@ MK_CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-m
|
|||
|
||||
ifeq '' '$(findstring clang++,$(CXX))'
|
||||
# g++ only
|
||||
MK_CXXFLAGS += -Wno-format-truncation
|
||||
MK_CXXFLAGS += -Wno-format-truncation -Wno-array-bounds
|
||||
endif
|
||||
|
||||
# OS specific
|
||||
|
@ -381,9 +442,8 @@ k_quants.o: k_quants.c k_quants.h
|
|||
endif # LLAMA_NO_K_QUANTS
|
||||
|
||||
# combine build flags with cmdline overrides
|
||||
override CPPFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS)
|
||||
override CFLAGS := $(MK_CFLAGS) $(CFLAGS)
|
||||
override CXXFLAGS := $(MK_CXXFLAGS) $(CXXFLAGS)
|
||||
override CFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS) $(MK_CFLAGS) $(CFLAGS)
|
||||
override CXXFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS) $(MK_CXXFLAGS) $(CXXFLAGS)
|
||||
override LDFLAGS := $(MK_LDFLAGS) $(LDFLAGS)
|
||||
|
||||
#
|
||||
|
|
|
@ -2,8 +2,30 @@
|
|||
|
||||
import PackageDescription
|
||||
|
||||
#if arch(arm) || arch(arm64)
|
||||
let platforms: [SupportedPlatform]? = [
|
||||
.macOS(.v11),
|
||||
.iOS(.v14),
|
||||
.watchOS(.v4),
|
||||
.tvOS(.v14)
|
||||
]
|
||||
let exclude: [String] = []
|
||||
let additionalSources: [String] = ["ggml-metal.m"]
|
||||
let additionalSettings: [CSetting] = [
|
||||
.unsafeFlags(["-fno-objc-arc"]),
|
||||
.define("GGML_SWIFT"),
|
||||
.define("GGML_USE_METAL")
|
||||
]
|
||||
#else
|
||||
let platforms: [SupportedPlatform]? = nil
|
||||
let exclude: [String] = ["ggml-metal.metal"]
|
||||
let additionalSources: [String] = []
|
||||
let additionalSettings: [CSetting] = []
|
||||
#endif
|
||||
|
||||
let package = Package(
|
||||
name: "llama",
|
||||
platforms: platforms,
|
||||
products: [
|
||||
.library(name: "llama", targets: ["llama"]),
|
||||
],
|
||||
|
@ -11,23 +33,23 @@ let package = Package(
|
|||
.target(
|
||||
name: "llama",
|
||||
path: ".",
|
||||
exclude: ["ggml-metal.metal"],
|
||||
exclude: exclude,
|
||||
sources: [
|
||||
"ggml.c",
|
||||
"llama.cpp",
|
||||
"ggml-alloc.c",
|
||||
"k_quants.c"
|
||||
],
|
||||
"k_quants.c",
|
||||
] + additionalSources,
|
||||
publicHeadersPath: "spm-headers",
|
||||
cSettings: [
|
||||
.unsafeFlags(["-Wno-shorten-64-to-32"]),
|
||||
.define("GGML_USE_K_QUANTS"),
|
||||
.define("GGML_USE_ACCELERATE")
|
||||
],
|
||||
] + additionalSettings,
|
||||
linkerSettings: [
|
||||
.linkedFramework("Accelerate")
|
||||
]
|
||||
),
|
||||
)
|
||||
],
|
||||
cxxLanguageStandard: .cxx11
|
||||
)
|
||||
|
|
30
README.md
30
README.md
|
@ -11,21 +11,9 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
|
|||
|
||||
### Hot topics
|
||||
|
||||
- #### IMPORTANT: Tokenizer fixes and API change (developers and projects using `llama.cpp` built-in tokenization must read): https://github.com/ggerganov/llama.cpp/pull/2810
|
||||
- Local Falcon 180B inference on Mac Studio
|
||||
|
||||
- GGUFv2 adds support for 64-bit sizes + backwards compatible: https://github.com/ggerganov/llama.cpp/pull/2821
|
||||
|
||||
- Added support for Falcon models: https://github.com/ggerganov/llama.cpp/pull/2717
|
||||
|
||||
- A new file format has been introduced: [GGUF](https://github.com/ggerganov/llama.cpp/pull/2398)
|
||||
|
||||
Last revision compatible with the old format: [dadbed9](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
|
||||
|
||||
### Current `master` should be considered in Beta - expect some issues for a few days!
|
||||
|
||||
### Be prepared to re-convert and / or re-quantize your GGUF models while this notice is up!
|
||||
|
||||
### Issues with non-GGUF models will be considered with low priority!
|
||||
https://github.com/ggerganov/llama.cpp/assets/1991296/98abd4e8-7077-464c-ae89-aebabca7757e
|
||||
|
||||
----
|
||||
|
||||
|
@ -413,7 +401,7 @@ Building the program with BLAS support may lead to some performance improvements
|
|||
|
||||
- #### hipBLAS
|
||||
|
||||
This provide BLAS acceleation on HIP supported GPU like AMD GPU.
|
||||
This provides BLAS acceleration on HIP-supported AMD GPUs.
|
||||
Make sure to have ROCm installed.
|
||||
You can download it from your Linux distro's package manager or from here: [ROCm Quick Start (Linux)](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html).
|
||||
Windows support is coming soon...
|
||||
|
@ -737,12 +725,12 @@ python3 convert.py pygmalion-7b/ --outtype q4_1
|
|||
|
||||
- Refer to [Facebook's LLaMA download page](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) if you want to access the model data.
|
||||
- Alternatively, if you want to save time and space, you can download already converted and quantized models from [TheBloke](https://huggingface.co/TheBloke), including:
|
||||
- [LLaMA 2 7B base](https://huggingface.co/TheBloke/Llama-2-7B-GGML)
|
||||
- [LLaMA 2 13B base](https://huggingface.co/TheBloke/Llama-2-13B-GGML)
|
||||
- [LLaMA 2 70B base](https://huggingface.co/TheBloke/Llama-2-70B-GGML)
|
||||
- [LLaMA 2 7B chat](https://huggingface.co/TheBloke/Llama-2-7B-chat-GGML)
|
||||
- [LLaMA 2 13B chat](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML)
|
||||
- [LLaMA 2 70B chat](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGML)
|
||||
- [LLaMA 2 7B base](https://huggingface.co/TheBloke/Llama-2-7B-GGUF)
|
||||
- [LLaMA 2 13B base](https://huggingface.co/TheBloke/Llama-2-13B-GGUF)
|
||||
- [LLaMA 2 70B base](https://huggingface.co/TheBloke/Llama-2-70B-GGUF)
|
||||
- [LLaMA 2 7B chat](https://huggingface.co/TheBloke/Llama-2-7B-chat-GGUF)
|
||||
- [LLaMA 2 13B chat](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF)
|
||||
- [LLaMA 2 70B chat](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGUF)
|
||||
|
||||
### Verifying the model files
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ int32_t get_num_physical_cores() {
|
|||
siblings.insert(line);
|
||||
}
|
||||
}
|
||||
if (siblings.size() > 0) {
|
||||
if (!siblings.empty()) {
|
||||
return static_cast<int32_t>(siblings.size());
|
||||
}
|
||||
#elif defined(__APPLE__) && defined(__MACH__)
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
#define DIRECTORY_SEPARATOR '/'
|
||||
#endif // _WIN32
|
||||
|
||||
#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
|
||||
#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", ##__VA_ARGS__); exit(1); } while (0)
|
||||
|
||||
//
|
||||
// CLI argument parsing
|
||||
//
|
||||
|
|
|
@ -415,6 +415,7 @@ namespace grammar_parser {
|
|||
|
||||
std::vector<const llama_grammar_element *> parse_state::c_rules() {
|
||||
std::vector<const llama_grammar_element *> ret;
|
||||
ret.reserve(rules.size());
|
||||
for (const auto & rule : rules) {
|
||||
ret.push_back(rule.data());
|
||||
}
|
||||
|
|
30
convert.py
30
convert.py
|
@ -145,7 +145,6 @@ GGML_FILE_TYPE_TO_DATA_TYPE: dict[GGMLFileType, DataType] = {
|
|||
class Params:
|
||||
n_vocab: int
|
||||
n_embd: int
|
||||
n_mult: int
|
||||
n_layer: int
|
||||
n_ctx: int
|
||||
n_ff: int
|
||||
|
@ -161,15 +160,6 @@ class Params:
|
|||
# path to the directory containing the model files
|
||||
path_model: Path | None = None
|
||||
|
||||
@staticmethod
|
||||
def find_n_mult(n_ff: int, n_embd: int) -> int:
|
||||
# hardcoded magic range
|
||||
for n_mult in range(8192, 1, -1):
|
||||
calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
|
||||
if calc_ff == n_ff:
|
||||
return n_mult
|
||||
raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
|
||||
|
||||
@staticmethod
|
||||
def guessed(model: LazyModel) -> Params:
|
||||
# try transformer naming first
|
||||
|
@ -197,7 +187,6 @@ class Params:
|
|||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_layer = n_layer,
|
||||
n_ctx = -1,
|
||||
n_ff = n_ff,
|
||||
|
@ -225,8 +214,6 @@ class Params:
|
|||
else:
|
||||
f_rope_scale = None
|
||||
|
||||
n_mult = Params.find_n_mult(n_ff, n_embd)
|
||||
|
||||
if "max_sequence_length" in config:
|
||||
n_ctx = config["max_sequence_length"]
|
||||
elif "max_position_embeddings" in config:
|
||||
|
@ -238,7 +225,6 @@ class Params:
|
|||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_layer = n_layer,
|
||||
n_ctx = n_ctx,
|
||||
n_ff = n_ff,
|
||||
|
@ -250,7 +236,7 @@ class Params:
|
|||
)
|
||||
|
||||
# LLaMA v2 70B params.json
|
||||
# {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1
|
||||
# {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1}
|
||||
@staticmethod
|
||||
def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params:
|
||||
config = json.load(open(config_path))
|
||||
|
@ -258,7 +244,6 @@ class Params:
|
|||
n_vocab = config["vocab_size"] if "vocab_size" in config else -1
|
||||
n_embd = config["dim"]
|
||||
n_layer = config["n_layers"]
|
||||
n_mult = config["multiple_of"]
|
||||
n_ff = -1
|
||||
n_head = config["n_heads"]
|
||||
n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head
|
||||
|
@ -266,7 +251,7 @@ class Params:
|
|||
f_rope_freq_base = config["rope_theta"] if "rope_theta" in config else None
|
||||
|
||||
# hack to determine LLaMA v1 vs v2 vs CodeLlama
|
||||
if f_rope_freq_base and f_rope_freq_base == 1000000:
|
||||
if f_rope_freq_base == 1000000:
|
||||
# CodeLlama
|
||||
n_ctx = 16384
|
||||
elif config["norm_eps"] == 1e-05:
|
||||
|
@ -285,7 +270,6 @@ class Params:
|
|||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_layer = n_layer,
|
||||
n_ctx = n_ctx,
|
||||
n_ff = n_ff,
|
||||
|
@ -841,9 +825,9 @@ class OutputFile:
|
|||
name = "LLaMA"
|
||||
|
||||
# TODO: better logic to determine model name
|
||||
if (params.n_ctx == 4096):
|
||||
if params.n_ctx == 4096:
|
||||
name = "LLaMA v2"
|
||||
elif params.path_model:
|
||||
elif params.path_model is not None:
|
||||
name = str(params.path_model.parent).split('/')[-1]
|
||||
|
||||
self.gguf.add_name (name)
|
||||
|
@ -856,13 +840,13 @@ class OutputFile:
|
|||
self.gguf.add_head_count_kv (params.n_head_kv)
|
||||
self.gguf.add_layer_norm_rms_eps (params.f_norm_eps)
|
||||
|
||||
if params.f_rope_freq_base:
|
||||
if params.f_rope_freq_base is not None:
|
||||
self.gguf.add_rope_freq_base(params.f_rope_freq_base)
|
||||
|
||||
if params.f_rope_scale:
|
||||
if params.f_rope_scale is not None:
|
||||
self.gguf.add_rope_scale_linear(params.f_rope_scale)
|
||||
|
||||
if params.ftype:
|
||||
if params.ftype is not None:
|
||||
self.gguf.add_file_type(params.ftype)
|
||||
|
||||
def add_meta_vocab(self, vocab: Vocab) -> None:
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "build-info.h"
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include "ggml.h"
|
||||
#include "llama.h"
|
||||
#include "common.h"
|
||||
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
@ -499,10 +500,10 @@ struct llama_file {
|
|||
errno = 0;
|
||||
std::size_t ret = std::fread(ptr, size, 1, fp);
|
||||
if (ferror(fp)) {
|
||||
throw std::runtime_error(format("read error: %s", strerror(errno)));
|
||||
die_fmt("fread failed: %s", strerror(errno));
|
||||
}
|
||||
if (ret != 1) {
|
||||
throw std::runtime_error(std::string("unexpectedly reached end of file"));
|
||||
die("unexpectedly reached end of file");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -597,8 +598,7 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
|
|||
printf("Assuming llama2.c vocabulary since %s is not a gguf file\n", filename);
|
||||
llama_file file(filename, "rb");
|
||||
if (!file.fp) {
|
||||
fprintf(stderr, "error: %s: %s\n", strerror(errno), filename);
|
||||
exit(1);
|
||||
die_fmt("%s: %s", strerror(errno), filename);
|
||||
}
|
||||
const int n_vocab = config->vocab_size;
|
||||
/* uint32_t max_token_length = */ file.read_u32(); // unused
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
// Defines sigaction on msys:
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "embd-input.h"
|
||||
|
||||
#include <cassert>
|
||||
|
@ -23,7 +18,7 @@ extern "C" {
|
|||
struct MyModel* create_mymodel(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,17 +11,12 @@
|
|||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
params.embedding = true;
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
||||
|
||||
if (params.seed == LLAMA_DEFAULT_SEED) {
|
||||
|
@ -47,6 +42,12 @@ int main(int argc, char ** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
const int n_ctx_train = llama_n_ctx_train(ctx);
|
||||
if (params.n_ctx > n_ctx_train) {
|
||||
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
|
||||
__func__, n_ctx_train, params.n_ctx);
|
||||
}
|
||||
|
||||
// print system information
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
|
|
|
@ -953,7 +953,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
gpt_params params;
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -925,7 +925,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
gpt_params params;
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
// Defines sigaction on msys:
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include "console.h"
|
||||
|
@ -48,8 +43,9 @@ static bool is_interacting = false;
|
|||
|
||||
void write_logfile(
|
||||
const llama_context * ctx, const gpt_params & params, const llama_model * model,
|
||||
const std::vector<llama_token> input_tokens, const std::string output, const std::vector<llama_token> output_tokens) {
|
||||
|
||||
const std::vector<llama_token> & input_tokens, const std::string & output,
|
||||
const std::vector<llama_token> & output_tokens
|
||||
) {
|
||||
if (params.logdir.empty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -109,7 +105,7 @@ int main(int argc, char ** argv) {
|
|||
gpt_params params;
|
||||
g_params = ¶ms;
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -186,8 +182,10 @@ int main(int argc, char ** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_ctx > llama_n_ctx(ctx)) {
|
||||
LOG_TEE("%s: warning: base model only supports context sizes no greater than %d tokens (%d specified)\n", __func__, llama_n_ctx(ctx), params.n_ctx);
|
||||
const int n_ctx_train = llama_n_ctx_train(ctx);
|
||||
if (params.n_ctx > n_ctx_train) {
|
||||
LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n",
|
||||
__func__, n_ctx_train, params.n_ctx);
|
||||
} else if (params.n_ctx < 8) {
|
||||
LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
||||
params.n_ctx = 8;
|
||||
|
@ -303,7 +301,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
// debug message about similarity of saved session, if applicable
|
||||
size_t n_matching_session_tokens = 0;
|
||||
if (session_tokens.size() > 0) {
|
||||
if (!session_tokens.empty()) {
|
||||
for (llama_token id : session_tokens) {
|
||||
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
|
||||
break;
|
||||
|
@ -401,7 +399,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
LOG_TEE("%s: interactive mode on.\n", __func__);
|
||||
|
||||
if (params.antiprompt.size()) {
|
||||
if (!params.antiprompt.empty()) {
|
||||
for (const auto & antiprompt : params.antiprompt) {
|
||||
LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
|
||||
}
|
||||
|
@ -499,7 +497,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
||||
// predict
|
||||
if (embd.size() > 0) {
|
||||
if (!embd.empty()) {
|
||||
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
|
||||
// --prompt or --file which uses the same value.
|
||||
int max_embd_size = n_ctx - 4;
|
||||
|
@ -624,7 +622,7 @@ int main(int argc, char ** argv) {
|
|||
LOG("n_past = %d\n", n_past);
|
||||
}
|
||||
|
||||
if (embd.size() > 0 && !path_session.empty()) {
|
||||
if (!embd.empty() && !path_session.empty()) {
|
||||
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
|
||||
n_session_consumed = session_tokens.size();
|
||||
}
|
||||
|
@ -695,7 +693,7 @@ int main(int argc, char ** argv) {
|
|||
// if not currently processing queued inputs;
|
||||
if ((int) embd_inp.size() <= n_consumed) {
|
||||
// check for reverse prompt
|
||||
if (params.antiprompt.size()) {
|
||||
if (!params.antiprompt.empty()) {
|
||||
std::string last_output;
|
||||
for (auto id : last_tokens) {
|
||||
last_output += llama_token_to_piece(ctx, id);
|
||||
|
@ -732,7 +730,7 @@ int main(int argc, char ** argv) {
|
|||
LOG("found EOS token\n");
|
||||
|
||||
if (params.interactive) {
|
||||
if (params.antiprompt.size() != 0) {
|
||||
if (!params.antiprompt.empty()) {
|
||||
// tokenize and inject first reverse prompt
|
||||
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
|
||||
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
||||
|
|
|
@ -655,7 +655,7 @@ int main(int argc, char ** argv) {
|
|||
gpt_params params;
|
||||
|
||||
params.n_batch = 512;
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -693,9 +693,10 @@ int main(int argc, char ** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_ctx > llama_n_ctx(ctx)) {
|
||||
fprintf(stderr, "%s: warning: model might not support context sizes greater than %d tokens (%d specified);"
|
||||
"expect poor results\n", __func__, llama_n_ctx(ctx), params.n_ctx);
|
||||
const int n_ctx_train = llama_n_ctx_train(ctx);
|
||||
if (params.n_ctx > n_ctx_train) {
|
||||
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
|
||||
__func__, n_ctx_train, params.n_ctx);
|
||||
}
|
||||
|
||||
// print system information
|
||||
|
|
|
@ -71,7 +71,7 @@ void quantize_stats_print_usage(int /*argc*/, char ** argv) {
|
|||
}
|
||||
|
||||
// Check if a layer is included/excluded by command line
|
||||
bool layer_included(const quantize_stats_params params, const std::string & layer) {
|
||||
bool layer_included(const quantize_stats_params & params, const std::string & layer) {
|
||||
for (const auto& excluded : params.exclude_layers) {
|
||||
if (std::regex_search(layer, std::regex(excluded))) {
|
||||
return false;
|
||||
|
|
|
@ -143,10 +143,9 @@ int main(int argc, char ** argv) {
|
|||
if (!try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
|
||||
fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]);
|
||||
return 1;
|
||||
} else {
|
||||
if (ftype_str == "COPY") {
|
||||
params.only_copy = true;
|
||||
}
|
||||
}
|
||||
if (ftype_str == "COPY") {
|
||||
params.only_copy = true;
|
||||
}
|
||||
arg_idx++;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ int main(int argc, char ** argv) {
|
|||
params.repeat_last_n = 64;
|
||||
params.prompt = "The quick brown fox";
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ int main(int argc, char ** argv) {
|
|||
llama_free_model(model);
|
||||
return 1;
|
||||
}
|
||||
auto tokens = llama_tokenize(ctx, params.prompt.c_str(), true);
|
||||
auto tokens = llama_tokenize(ctx, params.prompt, true);
|
||||
auto n_prompt_tokens = tokens.size();
|
||||
if (n_prompt_tokens < 1) {
|
||||
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
|
||||
|
|
|
@ -139,7 +139,7 @@ static std::string tokens_to_output_formatted_string(const llama_context *ctx, c
|
|||
}
|
||||
|
||||
// convert a vector of completion_token_output to json
|
||||
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> probs)
|
||||
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> & probs)
|
||||
{
|
||||
json out = json::array();
|
||||
for (const auto &prob : probs)
|
||||
|
@ -271,7 +271,7 @@ struct llama_server_context
|
|||
return true;
|
||||
}
|
||||
|
||||
std::vector<llama_token> tokenize(json json_prompt, bool add_bos)
|
||||
std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
|
||||
{
|
||||
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
|
||||
// or the first element of the json_prompt array is a string.
|
||||
|
@ -611,7 +611,7 @@ struct llama_server_context
|
|||
|
||||
completion_token_output doCompletion()
|
||||
{
|
||||
const completion_token_output token_with_probs = nextToken();
|
||||
auto token_with_probs = nextToken();
|
||||
|
||||
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(ctx, token_with_probs.tok);
|
||||
generated_text += token_text;
|
||||
|
@ -1255,7 +1255,7 @@ void beam_search_callback(void * callback_data, llama_beams_state beams_state) {
|
|||
struct token_translator {
|
||||
llama_context * ctx;
|
||||
std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
|
||||
std::string operator()(completion_token_output cto) const { return (*this)(cto.tok); }
|
||||
std::string operator()(const completion_token_output & cto) const { return (*this)(cto.tok); }
|
||||
};
|
||||
|
||||
void append_to_generated_text_from_generated_token_probs(llama_server_context & llama) {
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "build-info.h"
|
||||
|
||||
#include "common.h"
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "build-info.h"
|
||||
|
||||
#include "common.h"
|
||||
|
|
|
@ -169,10 +169,6 @@ struct my_llama_hparams {
|
|||
|
||||
float rope_freq_base = 10000.0f;
|
||||
float rope_freq_scale = 1.0f;
|
||||
|
||||
bool operator!=(const my_llama_hparams& other) const {
|
||||
return memcmp(this, &other, sizeof(my_llama_hparams));
|
||||
}
|
||||
};
|
||||
|
||||
struct my_llama_layer {
|
||||
|
@ -929,28 +925,6 @@ void get_example_targets_batch(struct llama_context * lctx, const int * train_sa
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __MINGW32__
|
||||
__attribute__((format(gnu_printf, 1, 2)))
|
||||
#else
|
||||
__attribute__((format(printf, 1, 2)))
|
||||
#endif
|
||||
#endif
|
||||
static std::string format(const char * fmt, ...) {
|
||||
va_list ap, ap2;
|
||||
va_start(ap, fmt);
|
||||
va_copy(ap2, ap);
|
||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||
GGML_ASSERT(size >= 0 && size < INT_MAX);
|
||||
std::vector<char> buf(size + 1);
|
||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||
GGML_ASSERT(size2 == size);
|
||||
va_end(ap2);
|
||||
va_end(ap);
|
||||
return std::string(buf.data(), size);
|
||||
}
|
||||
|
||||
int tokenize_file(struct llama_context * lctx, const char * filename, std::vector<llama_token>& out) {
|
||||
FILE * fp = std::fopen(filename, "rb");
|
||||
if (fp == NULL) {
|
||||
|
@ -983,10 +957,10 @@ int tokenize_file(struct llama_context * lctx, const char * filename, std::vecto
|
|||
out.resize(size+1);
|
||||
|
||||
if (std::fread(buf.data(), size, 1, fp) != 1) {
|
||||
throw std::runtime_error(std::string("unexpectedly reached end of file"));
|
||||
die("unexpectedly reached end of file");
|
||||
}
|
||||
if (ferror(fp)) {
|
||||
throw std::runtime_error(format("read error: %s", strerror(errno)));
|
||||
die_fmt("fread failed: %s", strerror(errno));
|
||||
}
|
||||
|
||||
buf[size] = '\0';
|
||||
|
@ -1047,11 +1021,11 @@ void shuffle_ints(int * begin, int * end) {
|
|||
if (kid >= 0) { \
|
||||
enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
|
||||
if (ktype != (type)) { \
|
||||
throw std::runtime_error(format("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype))); \
|
||||
die_fmt("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype)); \
|
||||
} \
|
||||
(dst) = func(ctx, kid); \
|
||||
} else if (req) { \
|
||||
throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
|
||||
die_fmt("key not found in model: %s", skey.c_str()); \
|
||||
} \
|
||||
}
|
||||
|
||||
|
@ -1136,7 +1110,7 @@ void load_opt_context_gguf(struct gguf_context * fctx, struct ggml_context * f_g
|
|||
read_tensor_by_name(opt->lbfgs.lms, f_ggml_ctx, LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S);
|
||||
read_tensor_by_name(opt->lbfgs.lmy, f_ggml_ctx, LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y);
|
||||
} else {
|
||||
throw std::runtime_error("unknown optimizer type\n");
|
||||
die("unknown optimizer type");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1315,20 +1289,20 @@ void save_llama_model_gguf(struct gguf_context * fctx, const char * fn_vocab_mod
|
|||
|
||||
const int token_idx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_LIST));
|
||||
if (token_idx == -1) {
|
||||
throw std::runtime_error("cannot find tokenizer vocab in model file\n");
|
||||
die("cannot find tokenizer vocab in model file");
|
||||
}
|
||||
const uint32_t n_vocab = gguf_get_arr_n(vctx, token_idx);
|
||||
|
||||
const int score_idx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_SCORES));
|
||||
if (score_idx == -1) {
|
||||
throw std::runtime_error("cannot find tokenizer scores in model file\n");
|
||||
die("cannot find tokenizer scores in model file");
|
||||
}
|
||||
|
||||
const float * scores = (const float * ) gguf_get_arr_data(vctx, score_idx);
|
||||
|
||||
const int toktype_idx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE));
|
||||
if (toktype_idx == -1) {
|
||||
throw std::runtime_error("cannot find token type list in GGUF file\n");
|
||||
die("cannot find token type list in GGUF file");
|
||||
}
|
||||
|
||||
const int * toktypes = (const int * ) gguf_get_arr_data(vctx, toktype_idx);
|
||||
|
@ -1356,7 +1330,7 @@ void save_llama_model_gguf(struct gguf_context * fctx, const char * fn_vocab_mod
|
|||
// read and copy bpe merges
|
||||
const int merges_keyidx = gguf_find_key(vctx, kv(LLM_KV_TOKENIZER_MERGES));
|
||||
if (merges_keyidx == -1) {
|
||||
throw std::runtime_error("cannot find tokenizer merges in model file\n");
|
||||
die("cannot find tokenizer merges in model file");
|
||||
}
|
||||
|
||||
const int n_merges = gguf_get_arr_n(vctx, merges_keyidx);
|
||||
|
@ -1988,7 +1962,7 @@ void opt_callback(void * vdata, float * sched) {
|
|||
float min_sched = params->adam_min_alpha / params->adam_alpha;
|
||||
*sched = min_sched + *sched * (1.0f - min_sched);
|
||||
|
||||
int impr_plot = std::isnan(opt->loss_after) ? 0 : -(int)(1 + (opt->loss_before - opt->loss_after) * 10.0f + 0.5f);
|
||||
int impr_plot = std::isnan(opt->loss_after) ? 0 : -std::lround(1 + (opt->loss_before - opt->loss_after) * 10.0f);
|
||||
printf("%s: iter=%*d, sched=%f loss0=%f loss=%f | improvement: %*d>\n", __func__, 6, opt->iter, *sched, opt->loss_before, opt->loss_after, impr_plot, (int)0);
|
||||
|
||||
if (data->shuffle_countdown < n_batch) {
|
||||
|
|
|
@ -93,6 +93,10 @@
|
|||
type = "app";
|
||||
program = "${self.packages.${system}.default}/bin/quantize";
|
||||
};
|
||||
apps.train-text-from-scratch = {
|
||||
type = "app";
|
||||
program = "${self.packages.${system}.default}/bin/train-text-from-scratch";
|
||||
};
|
||||
apps.default = self.apps.${system}.llama;
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = [ llama-python ];
|
||||
|
|
17
ggml-alloc.c
17
ggml-alloc.c
|
@ -1,8 +1,3 @@
|
|||
// defines MAP_ANONYMOUS
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "ggml-alloc.h"
|
||||
#include "ggml.h"
|
||||
#include <assert.h>
|
||||
|
@ -138,7 +133,7 @@ static bool ggml_allocr_is_own(struct ggml_allocr * alloc, const struct ggml_ten
|
|||
|
||||
void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
GGML_ASSERT(ggml_is_view(tensor) == false); // views generally get data pointer from one of their sources
|
||||
GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources
|
||||
GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
|
||||
#endif
|
||||
size_t size = ggml_allocr_get_alloc_size(alloc, tensor);
|
||||
|
@ -165,14 +160,14 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor)
|
|||
if (best_fit_block == -1) {
|
||||
// the last block is our last resort
|
||||
struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
if (block->size >= size) {
|
||||
best_fit_block = alloc->n_free_blocks - 1;
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
} else {
|
||||
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
|
||||
__func__, size, max_avail);
|
||||
GGML_ASSERT(!"not enough space in the buffer");
|
||||
return;
|
||||
return;
|
||||
}
|
||||
}
|
||||
struct free_block * block = &alloc->free_blocks[best_fit_block];
|
||||
|
@ -316,7 +311,11 @@ static void * alloc_vmem(size_t size) {
|
|||
#if defined(_WIN32)
|
||||
return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
#elif defined(_POSIX_MAPPED_FILES)
|
||||
return mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
void * ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
if (ptr == MAP_FAILED) {
|
||||
return NULL;
|
||||
}
|
||||
return ptr;
|
||||
#else
|
||||
// use a fixed address for other platforms
|
||||
uintptr_t base_addr = (uintptr_t)-size - 0x100;
|
||||
|
|
32
ggml-cuda.cu
32
ggml-cuda.cu
|
@ -4086,7 +4086,8 @@ static __global__ void rope_neox_f32(const float * x, float * dst, const int nco
|
|||
dst[i + ncols/2] = x0*sin_theta + x1*cos_theta;
|
||||
}
|
||||
|
||||
static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const float p, const float block_p, const float theta_scale) {
|
||||
static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const float p0,
|
||||
const float p_delta, const int p_delta_rows, const float theta_scale, const int n_ctx) {
|
||||
const int col = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
const int half_n_dims = ncols/4;
|
||||
|
||||
|
@ -4098,8 +4099,9 @@ static __global__ void rope_glm_f32(const float * x, float * dst, const int ncol
|
|||
const int i = row*ncols + col;
|
||||
|
||||
const float col_theta_scale = powf(theta_scale, col);
|
||||
const float p = p0 + p_delta*(row/p_delta_rows);
|
||||
|
||||
const float theta = p*col_theta_scale;
|
||||
const float theta = min(p, p_delta*(n_ctx - 2))*col_theta_scale;
|
||||
const float sin_theta = sinf(theta);
|
||||
const float cos_theta = cosf(theta);
|
||||
|
||||
|
@ -4109,7 +4111,7 @@ static __global__ void rope_glm_f32(const float * x, float * dst, const int ncol
|
|||
dst[i + 0] = x0*cos_theta - x1*sin_theta;
|
||||
dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta;
|
||||
|
||||
const float block_theta = block_p*col_theta_scale;
|
||||
const float block_theta = max(p - p_delta*(n_ctx - 2), 0.f)*col_theta_scale;
|
||||
const float sin_block_theta = sinf(block_theta);
|
||||
const float cos_block_theta = cosf(block_theta);
|
||||
|
||||
|
@ -4984,12 +4986,13 @@ static void rope_neox_f32_cuda(const float * x, float * dst, const int ncols, co
|
|||
rope_neox_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
|
||||
}
|
||||
|
||||
static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p, const float block_p, const float theta_scale, cudaStream_t stream) {
|
||||
GGML_ASSERT(nrows % 4 == 0);
|
||||
const dim3 block_dims(4*CUDA_ROPE_BLOCK_SIZE, 1, 1);
|
||||
const int num_blocks_x = (ncols + 4*CUDA_ROPE_BLOCK_SIZE - 1) / (4*CUDA_ROPE_BLOCK_SIZE);
|
||||
static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
|
||||
const float p_delta, const int p_delta_rows, const float theta_scale, const int n_ctx, cudaStream_t stream) {
|
||||
GGML_ASSERT(ncols % 4 == 0);
|
||||
const dim3 block_dims(CUDA_ROPE_BLOCK_SIZE/4, 1, 1);
|
||||
const int num_blocks_x = (ncols + CUDA_ROPE_BLOCK_SIZE - 1) / CUDA_ROPE_BLOCK_SIZE;
|
||||
const dim3 block_nums(num_blocks_x, nrows, 1);
|
||||
rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p, block_p, theta_scale);
|
||||
rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale, n_ctx);
|
||||
}
|
||||
|
||||
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
|
||||
|
@ -5723,22 +5726,18 @@ inline void ggml_cuda_op_rope(
|
|||
memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
|
||||
|
||||
const float theta_scale = powf(freq_base, -2.0f/n_dims);
|
||||
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
|
||||
|
||||
const bool is_neox = mode & 2;
|
||||
const bool is_glm = mode & 4;
|
||||
|
||||
// compute
|
||||
if (is_glm) {
|
||||
const float p = (((mode & 1) == 0 ? n_past + i02 : i02)) * freq_scale;
|
||||
const float id_p = min(p, n_ctx - 2.f);
|
||||
const float block_p = max(p - (n_ctx - 2.f), 0.f);
|
||||
rope_glm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, id_p, block_p, theta_scale, cudaStream_main);
|
||||
rope_glm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, n_ctx, cudaStream_main);
|
||||
} else if (is_neox) {
|
||||
GGML_ASSERT(ne00 == n_dims && "ne00 != n_dims is not implemented for CUDA yet");
|
||||
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
|
||||
rope_neox_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
|
||||
} else {
|
||||
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
|
||||
rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
|
||||
}
|
||||
|
||||
|
@ -6400,10 +6399,7 @@ void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_ten
|
|||
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented
|
||||
|
||||
const int mode = ((int32_t *) dst->op_params)[2];
|
||||
const bool is_glm = mode & 4;
|
||||
|
||||
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, !is_glm); // flatten support not implemented for glm
|
||||
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, true);
|
||||
}
|
||||
|
||||
void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
|
|
29
ggml-metal.m
29
ggml-metal.m
|
@ -120,14 +120,17 @@ static NSString * const msl_library_source = @"see metal.metal";
|
|||
struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||
metal_printf("%s: allocating\n", __func__);
|
||||
|
||||
// Show all the Metal device instances in the system
|
||||
NSArray * devices = MTLCopyAllDevices();
|
||||
id <MTLDevice> device;
|
||||
NSString * s;
|
||||
|
||||
#if TARGET_OS_OSX
|
||||
// Show all the Metal device instances in the system
|
||||
NSArray * devices = MTLCopyAllDevices();
|
||||
for (device in devices) {
|
||||
s = [device name];
|
||||
metal_printf("%s: found device: %s\n", __func__, [s UTF8String]);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Pick and show default Metal device
|
||||
device = MTLCreateSystemDefaultDevice();
|
||||
|
@ -144,12 +147,20 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||
|
||||
ctx->d_queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT);
|
||||
|
||||
#if 0
|
||||
// compile from source string and show compile log
|
||||
#ifdef GGML_SWIFT
|
||||
// load the default.metallib file
|
||||
{
|
||||
NSError * error = nil;
|
||||
|
||||
ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
|
||||
NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
|
||||
NSString * llamaBundlePath = [bundle pathForResource:@"llama_llama" ofType:@"bundle"];
|
||||
NSBundle * llamaBundle = [NSBundle bundleWithPath:llamaBundlePath];
|
||||
NSString * libPath = [llamaBundle pathForResource:@"default" ofType:@"metallib"];
|
||||
NSURL * libURL = [NSURL fileURLWithPath:libPath];
|
||||
|
||||
// Load the metallib file into a Metal library
|
||||
ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
|
||||
|
||||
if (error) {
|
||||
metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
|
||||
return NULL;
|
||||
|
@ -253,13 +264,15 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||
#undef GGML_METAL_ADD_KERNEL
|
||||
}
|
||||
|
||||
metal_printf("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||
metal_printf("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
|
||||
#if TARGET_OS_OSX
|
||||
metal_printf("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||
if (ctx->device.maxTransferRate != 0) {
|
||||
metal_printf("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
|
||||
} else {
|
||||
metal_printf("%s: maxTransferRate = built-in GPU\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
@ -462,6 +475,7 @@ bool ggml_metal_add_buffer(
|
|||
}
|
||||
}
|
||||
|
||||
#if TARGET_OS_OSX
|
||||
metal_printf(", (%8.2f / %8.2f)",
|
||||
ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
|
||||
ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||
|
@ -471,6 +485,9 @@ bool ggml_metal_add_buffer(
|
|||
} else {
|
||||
metal_printf("\n");
|
||||
}
|
||||
#else
|
||||
metal_printf(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0);
|
||||
#endif
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
135
ggml-metal.metal
135
ggml-metal.metal
|
@ -1200,31 +1200,40 @@ kernel void kernel_mul_mat_q3_K_f32(
|
|||
device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0;
|
||||
device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
|
||||
|
||||
float yl[16];
|
||||
float yl[32];
|
||||
|
||||
const uint16_t kmask1 = 0x0303;
|
||||
const uint16_t kmask1 = 0x3030;
|
||||
const uint16_t kmask2 = 0x0f0f;
|
||||
|
||||
const int tid = tiisg/2;
|
||||
const int ix = tiisg%2;
|
||||
const int ip = tid/8; // 0 or 1
|
||||
const int il = tid/2 - 4*ip; // 0...3
|
||||
const int tid = tiisg/4;
|
||||
const int ix = tiisg%4;
|
||||
const int ip = tid/4; // 0 or 1
|
||||
const int il = 2*((tid%4)/2); // 0 or 2
|
||||
const int ir = tid%2;
|
||||
const int n = 8;
|
||||
const int l0 = n*ir;
|
||||
|
||||
const uint16_t m1 = 1 << (4*ip + il);
|
||||
const uint16_t m2 = m1 << 8;
|
||||
// One would think that the Metal compiler would figure out that ip and il can only have
|
||||
// 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it
|
||||
// with these two tales.
|
||||
//
|
||||
// Possible masks for the high bit
|
||||
const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0
|
||||
{0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2
|
||||
{0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0
|
||||
{0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2
|
||||
|
||||
// Possible masks for the low 2 bits
|
||||
const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}};
|
||||
|
||||
const ushort4 hm = mm[2*ip + il/2];
|
||||
|
||||
const int shift = 2*il;
|
||||
const uint16_t qm1 = 0x0003 << shift;
|
||||
const uint16_t qm2 = 0x0300 << shift;
|
||||
const int32_t v1 = 4 << shift;
|
||||
const int32_t v2 = 1024 << shift;
|
||||
const float v1 = il == 0 ? 4.f : 64.f;
|
||||
const float v2 = 4.f * v1;
|
||||
|
||||
const uint16_t s_shift1 = 4*ip;
|
||||
const uint16_t s_shift2 = s_shift1 + 2*(il/2);
|
||||
const int ik = 4 + (il%2);
|
||||
const uint16_t s_shift2 = s_shift1 + il;
|
||||
|
||||
const int q_offset = 32*ip + l0;
|
||||
const int y_offset = 128*ip + 32*il + l0;
|
||||
|
@ -1233,12 +1242,19 @@ kernel void kernel_mul_mat_q3_K_f32(
|
|||
|
||||
device const float * y1 = yy + ix*QK_K + y_offset;
|
||||
|
||||
float sumf1[2] = {0.f}, sumf2[2] = {0.f};
|
||||
for (int i = ix; i < nb; i += 2) {
|
||||
uint32_t scales32, aux32;
|
||||
thread uint16_t * scales16 = (thread uint16_t *)&scales32;
|
||||
thread const int8_t * scales = (thread const int8_t *)&scales32;
|
||||
|
||||
float sumf1[2] = {0.f};
|
||||
float sumf2[2] = {0.f};
|
||||
for (int i = ix; i < nb; i += 4) {
|
||||
|
||||
for (int l = 0; l < 8; ++l) {
|
||||
yl[l+0] = y1[l+ 0];
|
||||
yl[l+8] = y1[l+16];
|
||||
yl[l+ 0] = y1[l+ 0];
|
||||
yl[l+ 8] = y1[l+16];
|
||||
yl[l+16] = y1[l+32];
|
||||
yl[l+24] = y1[l+48];
|
||||
}
|
||||
|
||||
device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset);
|
||||
|
@ -1249,27 +1265,43 @@ kernel void kernel_mul_mat_q3_K_f32(
|
|||
for (int row = 0; row < 2; ++row) {
|
||||
|
||||
const float d_all = (float)dh[0];
|
||||
const char2 scales = as_type<char2>((uint16_t)(((a[il] >> s_shift1) & kmask2) | (((a[ik] >> s_shift2) & kmask1) << 4)));
|
||||
|
||||
float s1 = 0, s2 = 0;
|
||||
for (int l = 0; l < n; l += 2) {
|
||||
const uint16_t qs = q[l/2];
|
||||
s1 += yl[l+0] * ((int32_t)(qs & qm1) - ((h[l/2] & m1) ? 0 : v1));
|
||||
s2 += yl[l+1] * ((int32_t)(qs & qm2) - ((h[l/2] & m2) ? 0 : v2));
|
||||
}
|
||||
float d = d_all * (s1 + 1.f/256.f * s2);
|
||||
sumf1[row] += d * scales[0];
|
||||
sumf2[row] += d;
|
||||
scales16[0] = a[4];
|
||||
scales16[1] = a[5];
|
||||
aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030;
|
||||
scales16[0] = a[il+0];
|
||||
scales16[1] = a[il+1];
|
||||
scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32;
|
||||
|
||||
s1 = s2 = 0;
|
||||
float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0;
|
||||
for (int l = 0; l < n; l += 2) {
|
||||
const uint16_t qs = q[l/2+8];
|
||||
s1 += yl[l+8] * ((int32_t)(qs & qm1) - ((h[l/2+8] & m1) ? 0 : v1));
|
||||
s2 += yl[l+9] * ((int32_t)(qs & qm2) - ((h[l/2+8] & m2) ? 0 : v2));
|
||||
const int32_t qs = q[l/2];
|
||||
s1 += yl[l+0] * (qs & qm[il/2][0]);
|
||||
s2 += yl[l+1] * (qs & qm[il/2][1]);
|
||||
s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]);
|
||||
s4 += yl[l+16] * (qs & qm[il/2][2]);
|
||||
s5 += yl[l+17] * (qs & qm[il/2][3]);
|
||||
s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]);
|
||||
}
|
||||
d = d_all * (s1 + 1.f/256.f * s2);
|
||||
sumf1[row] += d * scales[1];
|
||||
sumf2[row] += d;
|
||||
float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1);
|
||||
float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2);
|
||||
sumf1[row] += d1 * (scales[0] - 32);
|
||||
sumf2[row] += d2 * (scales[2] - 32);
|
||||
|
||||
s1 = s2 = s3 = s4 = s5 = s6 = 0;
|
||||
for (int l = 0; l < n; l += 2) {
|
||||
const int32_t qs = q[l/2+8];
|
||||
s1 += yl[l+8] * (qs & qm[il/2][0]);
|
||||
s2 += yl[l+9] * (qs & qm[il/2][1]);
|
||||
s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]);
|
||||
s4 += yl[l+24] * (qs & qm[il/2][2]);
|
||||
s5 += yl[l+25] * (qs & qm[il/2][3]);
|
||||
s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]);
|
||||
}
|
||||
d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1);
|
||||
d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2);
|
||||
sumf1[row] += d1 * (scales[1] - 32);
|
||||
sumf2[row] += d2 * (scales[3] - 32);
|
||||
|
||||
q += step;
|
||||
h += step;
|
||||
|
@ -1278,17 +1310,20 @@ kernel void kernel_mul_mat_q3_K_f32(
|
|||
|
||||
}
|
||||
|
||||
y1 += 2 * QK_K;
|
||||
y1 += 4 * QK_K;
|
||||
|
||||
}
|
||||
|
||||
for (int row = 0; row < 2; ++row) {
|
||||
const float sumf = (sumf1[row] - 32.f*sumf2[row]) / (1 << shift);
|
||||
const float tot = simd_sum(sumf);
|
||||
if (tiisg == 0) {
|
||||
dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = tot;
|
||||
const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift);
|
||||
sumf1[row] = simd_sum(sumf);
|
||||
}
|
||||
if (tiisg == 0) {
|
||||
for (int row = 0; row < 2; ++row) {
|
||||
dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = sumf1[row];
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
kernel void kernel_mul_mat_q3_K_f32(
|
||||
|
@ -1641,17 +1676,25 @@ kernel void kernel_mul_mat_q5_K_f32(
|
|||
sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2);
|
||||
sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2);
|
||||
|
||||
float4 acc = {0.f, 0.f, 0.f, 0.f};
|
||||
float4 acc1 = {0.f};
|
||||
float4 acc2 = {0.f};
|
||||
for (int l = 0; l < n; ++l) {
|
||||
uint8_t h = qh[l];
|
||||
acc[0] += yl[l+0] * ((uint16_t)(q1[l] & 0x0F) + (h & hm1 ? 16 : 0));
|
||||
acc[1] += yl[l+8] * ((uint16_t)(q1[l] & 0xF0) + (h & hm2 ? 256 : 0));
|
||||
acc[2] += yh[l+0] * ((uint16_t)(q2[l] & 0x0F) + (h & hm3 ? 16 : 0));
|
||||
acc[3] += yh[l+8] * ((uint16_t)(q2[l] & 0xF0) + (h & hm4 ? 256 : 0));
|
||||
acc1[0] += yl[l+0] * (q1[l] & 0x0F);
|
||||
acc1[1] += yl[l+8] * (q1[l] & 0xF0);
|
||||
acc1[2] += yh[l+0] * (q2[l] & 0x0F);
|
||||
acc1[3] += yh[l+8] * (q2[l] & 0xF0);
|
||||
acc2[0] += h & hm1 ? yl[l+0] : 0.f;
|
||||
acc2[1] += h & hm2 ? yl[l+8] : 0.f;
|
||||
acc2[2] += h & hm3 ? yh[l+0] : 0.f;
|
||||
acc2[3] += h & hm4 ? yh[l+8] : 0.f;
|
||||
}
|
||||
const float dall = dh[0];
|
||||
const float dmin = dh[1];
|
||||
sumf[row] += dall * (acc[0] * sc8[0] + acc[1] * sc8[1] * 1.f/16.f + acc[2] * sc8[4] + acc[3] * sc8[5] * 1.f/16.f) -
|
||||
sumf[row] += dall * (sc8[0] * (acc1[0] + 16.f*acc2[0]) +
|
||||
sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) +
|
||||
sc8[4] * (acc1[2] + 16.f*acc2[2]) +
|
||||
sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) -
|
||||
dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
|
||||
|
||||
q1 += step;
|
||||
|
|
38
ggml.c
38
ggml.c
|
@ -1,4 +1,3 @@
|
|||
#define _GNU_SOURCE // Defines CLOCK_MONOTONIC on Linux
|
||||
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
|
||||
|
||||
#include "ggml.h"
|
||||
|
@ -47,6 +46,10 @@
|
|||
// disable "possible loss of data" to avoid hundreds of casts
|
||||
// we should just be careful :)
|
||||
#pragma warning(disable: 4244 4267)
|
||||
|
||||
// disable POSIX deprecation warnigns
|
||||
// these functions are never going away, anyway
|
||||
#pragma warning(disable: 4996)
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
@ -103,6 +106,9 @@ typedef void * thread_ret_t;
|
|||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#endif
|
||||
#ifdef GGML_USE_CPU_HBM
|
||||
#include <hbwmalloc.h>
|
||||
#endif
|
||||
|
||||
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
|
||||
|
@ -192,8 +198,14 @@ typedef void * thread_ret_t;
|
|||
#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
|
||||
#else
|
||||
inline static void * ggml_aligned_malloc(size_t size) {
|
||||
if (size == 0) {
|
||||
GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
|
||||
return NULL;
|
||||
}
|
||||
void * aligned_memory = NULL;
|
||||
#ifdef GGML_USE_METAL
|
||||
#ifdef GGML_USE_CPU_HBM
|
||||
int result = hbw_posix_memalign(&aligned_memory, 16, size);
|
||||
#elif GGML_USE_METAL
|
||||
int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
|
||||
#else
|
||||
int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
|
||||
|
@ -215,8 +227,12 @@ inline static void * ggml_aligned_malloc(size_t size) {
|
|||
return aligned_memory;
|
||||
}
|
||||
#define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
|
||||
#ifdef GGML_USE_CPU_HBM
|
||||
#define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
|
||||
#else
|
||||
#define GGML_ALIGNED_FREE(ptr) free(ptr)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define UNUSED GGML_UNUSED
|
||||
#define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
|
||||
|
@ -294,12 +310,14 @@ typedef double ggml_float;
|
|||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
#include <intrin.h>
|
||||
#else
|
||||
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
|
||||
#if !defined(__riscv)
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __riscv_v_intrinsic
|
||||
#include <riscv_vector.h>
|
||||
|
@ -4566,6 +4584,11 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
// allow to call ggml_init with 0 size
|
||||
if (params.mem_size == 0) {
|
||||
params.mem_size = GGML_MEM_ALIGN;
|
||||
}
|
||||
|
||||
const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
|
||||
|
||||
*ctx = (struct ggml_context) {
|
||||
|
@ -4768,7 +4791,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
|
|||
|
||||
size_t obj_alloc_size = 0;
|
||||
|
||||
if (view_src == NULL && ctx->no_alloc == false) {
|
||||
if (view_src == NULL && !ctx->no_alloc) {
|
||||
if (ctx->scratch.data != NULL) {
|
||||
// allocate tensor data in the scratch buffer
|
||||
if (ctx->scratch.offs + data_size > ctx->scratch.size) {
|
||||
|
@ -5469,7 +5492,7 @@ static struct ggml_tensor * ggml_mul_impl(
|
|||
}
|
||||
|
||||
if (inplace) {
|
||||
GGML_ASSERT(is_node == false);
|
||||
GGML_ASSERT(!is_node);
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
@ -5512,7 +5535,7 @@ static struct ggml_tensor * ggml_div_impl(
|
|||
}
|
||||
|
||||
if (inplace) {
|
||||
GGML_ASSERT(is_node == false);
|
||||
GGML_ASSERT(!is_node);
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
@ -18854,7 +18877,6 @@ static enum ggml_opt_result linesearch_backtracking(
|
|||
// strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
|
||||
return count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19957,7 +19979,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
|||
|
||||
struct ggml_tensor * data = NULL;
|
||||
|
||||
if (params.no_alloc == false) {
|
||||
if (!params.no_alloc) {
|
||||
data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
|
||||
|
||||
ok = ok && data != NULL;
|
||||
|
@ -19998,7 +20020,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
|||
}
|
||||
|
||||
// point the data member to the appropriate location in the binary blob using the tensor infos
|
||||
if (params.no_alloc == false) {
|
||||
if (!params.no_alloc) {
|
||||
//cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
|
||||
cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
|
||||
}
|
||||
|
|
58
llama.cpp
58
llama.cpp
|
@ -1,8 +1,3 @@
|
|||
// Defines fileno on msys:
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "llama.h"
|
||||
|
||||
#include "ggml.h"
|
||||
|
@ -126,6 +121,9 @@ void replace_all(std::string & s, const std::string & search, const std::string
|
|||
}
|
||||
s = std::move(result);
|
||||
}
|
||||
#ifdef GGML_USE_CPU_HBM
|
||||
#include <hbwmalloc.h>
|
||||
#endif
|
||||
|
||||
static void zeros(std::ofstream & file, size_t n) {
|
||||
char zero = 0;
|
||||
|
@ -450,6 +448,9 @@ static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph *
|
|||
#elif GGML_USE_METAL
|
||||
# define llama_host_malloc(n) ggml_metal_host_malloc(n)
|
||||
# define llama_host_free(data) ggml_metal_host_free(data)
|
||||
#elif GGML_USE_CPU_HBM
|
||||
# define llama_host_malloc(n) hbw_malloc(n)
|
||||
# define llama_host_free(data) if (data != NULL) hbw_free(data)
|
||||
#else
|
||||
# define llama_host_malloc(n) malloc(n)
|
||||
# define llama_host_free(data) free(data)
|
||||
|
@ -1489,7 +1490,11 @@ struct llama_model_loader {
|
|||
// allocate temp buffer if not using mmap
|
||||
if (!use_mmap && cur->data == NULL) {
|
||||
GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
|
||||
cur->data = malloc(ggml_nbytes(cur));
|
||||
#ifdef GGML_USE_CPU_HBM
|
||||
cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
|
||||
#else
|
||||
cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
|
||||
#endif
|
||||
}
|
||||
|
||||
load_data_for(cur);
|
||||
|
@ -3052,33 +3057,10 @@ static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
|
|||
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
|
||||
}
|
||||
|
||||
static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token id) {
|
||||
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
|
||||
}
|
||||
|
||||
static bool llama_is_unused_token(const llama_vocab & vocab, llama_token id) {
|
||||
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNUSED;
|
||||
}
|
||||
|
||||
static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
|
||||
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
|
||||
}
|
||||
|
||||
static bool llama_is_bos_token(const llama_vocab & vocab, llama_token id) {
|
||||
GGML_ASSERT(llama_is_control_token(vocab, id));
|
||||
return id == vocab.special_bos_id;
|
||||
}
|
||||
|
||||
static bool llama_is_eos_token(const llama_vocab & vocab, llama_token id ) {
|
||||
GGML_ASSERT(llama_is_control_token(vocab, id));
|
||||
return id == vocab.special_eos_id;
|
||||
}
|
||||
|
||||
static bool llama_is_pad_token(const llama_vocab & vocab, llama_token id ) {
|
||||
GGML_ASSERT(id < 0 || llama_is_control_token(vocab, id));
|
||||
return id == vocab.special_pad_id;
|
||||
}
|
||||
|
||||
static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
|
||||
GGML_ASSERT(llama_is_byte_token(vocab, id));
|
||||
const auto& token_data = vocab.id_to_token.at(id);
|
||||
|
@ -4800,9 +4782,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||
std::vector<std::thread> workers;
|
||||
std::mutex mutex;
|
||||
|
||||
#ifdef GGML_USE_K_QUANTS
|
||||
auto use_more_bits = [] (int i_layer, int num_layers) -> bool {
|
||||
return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
|
||||
};
|
||||
#endif
|
||||
|
||||
int idx = 0;
|
||||
|
||||
|
@ -5649,15 +5633,19 @@ void llama_free(struct llama_context * ctx) {
|
|||
}
|
||||
|
||||
int llama_n_vocab(const struct llama_context * ctx) {
|
||||
return ctx->model.vocab.id_to_token.size();
|
||||
return llama_model_n_vocab(&ctx->model);
|
||||
}
|
||||
|
||||
int llama_n_ctx(const struct llama_context * ctx) {
|
||||
return ctx->model.hparams.n_ctx;
|
||||
return llama_model_n_ctx(&ctx->model);
|
||||
}
|
||||
|
||||
int llama_n_ctx_train(const struct llama_context * ctx) {
|
||||
return llama_model_n_ctx_train(&ctx->model);
|
||||
}
|
||||
|
||||
int llama_n_embd(const struct llama_context * ctx) {
|
||||
return ctx->model.hparams.n_embd;
|
||||
return llama_model_n_embd(&ctx->model);
|
||||
}
|
||||
|
||||
enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx) {
|
||||
|
@ -5672,6 +5660,10 @@ int llama_model_n_ctx(const struct llama_model * model) {
|
|||
return model->hparams.n_ctx;
|
||||
}
|
||||
|
||||
int llama_model_n_ctx_train(const struct llama_model * model) {
|
||||
return model->hparams.n_ctx_train;
|
||||
}
|
||||
|
||||
int llama_model_n_embd(const struct llama_model * model) {
|
||||
return model->hparams.n_embd;
|
||||
}
|
||||
|
@ -5947,7 +5939,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
|
|||
rng_ss.str(std::string(&rng_buf[0], rng_size));
|
||||
rng_ss >> ctx->rng;
|
||||
|
||||
GGML_ASSERT(rng_ss.fail() == false);
|
||||
GGML_ASSERT(!rng_ss.fail());
|
||||
}
|
||||
|
||||
// set logits
|
||||
|
|
14
llama.h
14
llama.h
|
@ -245,15 +245,17 @@ extern "C" {
|
|||
LLAMA_API bool llama_mmap_supported (void);
|
||||
LLAMA_API bool llama_mlock_supported(void);
|
||||
|
||||
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_vocab (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_ctx_train(const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||
|
||||
LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx);
|
||||
|
||||
LLAMA_API int llama_model_n_vocab(const struct llama_model * model);
|
||||
LLAMA_API int llama_model_n_ctx (const struct llama_model * model);
|
||||
LLAMA_API int llama_model_n_embd (const struct llama_model * model);
|
||||
LLAMA_API int llama_model_n_vocab (const struct llama_model * model);
|
||||
LLAMA_API int llama_model_n_ctx (const struct llama_model * model);
|
||||
LLAMA_API int llama_model_n_ctx_train(const struct llama_model * model);
|
||||
LLAMA_API int llama_model_n_embd (const struct llama_model * model);
|
||||
|
||||
// Get a string describing the model type
|
||||
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||
|
|
|
@ -76,7 +76,7 @@ void * align_with_offset(void * ptr, int offset) {
|
|||
return (char *) std::align(MAX_ALIGNMENT, MAX_ALIGNMENT, ptr, dummy_size) + offset;
|
||||
}
|
||||
|
||||
void benchmark_function(size_t size, size_t q_size, int64_t iterations, std::function<size_t(void)> function) {
|
||||
void benchmark_function(size_t size, size_t q_size, int64_t iterations, const std::function<size_t(void)> & function) {
|
||||
int64_t min_time_us = INT64_MAX;
|
||||
int64_t total_time_us = 0;
|
||||
int64_t min_time_cycles = INT64_MAX;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue