Merge remote-tracking branch 'upstream/master' into check-c-compliance

This commit is contained in:
Cebtenzzre 2023-08-29 11:26:51 -04:00
commit f82db06ab5
38 changed files with 2946 additions and 2687 deletions

View file

@ -13,12 +13,13 @@
# It is up to the user to install the correct vendor-specific support. # It is up to the user to install the correct vendor-specific support.
Name: llama.cpp-clblast Name: llama.cpp-clblast
Version: master Version: %( date "+%%Y%%m%%d" )
Release: 1%{?dist} Release: 1%{?dist}
Summary: OpenCL Inference of LLaMA model in pure C/C++ Summary: OpenCL Inference of LLaMA model in C/C++
License: MIT License: MIT
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
BuildRequires: coreutils make gcc-c++ git mesa-libOpenCL-devel BuildRequires: coreutils make gcc-c++ git mesa-libOpenCL-devel clblast-devel
Requires: clblast
URL: https://github.com/ggerganov/llama.cpp URL: https://github.com/ggerganov/llama.cpp
%define debug_package %{nil} %define debug_package %{nil}
@ -35,18 +36,43 @@ make -j LLAMA_CLBLAST=1
%install %install
mkdir -p %{buildroot}%{_bindir}/ mkdir -p %{buildroot}%{_bindir}/
cp -p main %{buildroot}%{_bindir}/llamacppclblast cp -p main %{buildroot}%{_bindir}/llamaclblast
cp -p server %{buildroot}%{_bindir}/llamacppclblastserver cp -p server %{buildroot}%{_bindir}/llamaclblastserver
cp -p simple %{buildroot}%{_bindir}/llamacppclblastsimple cp -p simple %{buildroot}%{_bindir}/llamaclblastsimple
mkdir -p %{buildroot}/usr/lib/systemd/system
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamaclblast.service
[Unit]
Description=Llama.cpp server, CPU only (no GPU support in this build).
After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
[Service]
Type=simple
EnvironmentFile=/etc/sysconfig/llama
ExecStart=/usr/bin/llamaclblastserver $LLAMA_ARGS
ExecReload=/bin/kill -s HUP $MAINPID
Restart=never
[Install]
WantedBy=default.target
EOF
mkdir -p %{buildroot}/etc/sysconfig
%{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
EOF
%clean %clean
rm -rf %{buildroot} rm -rf %{buildroot}
rm -rf %{_builddir}/* rm -rf %{_builddir}/*
%files %files
%{_bindir}/llamacppclblast %{_bindir}/llamaclblast
%{_bindir}/llamacppclblastserver %{_bindir}/llamaclblastserver
%{_bindir}/llamacppclblastsimple %{_bindir}/llamaclblastsimple
/usr/lib/systemd/system/llamaclblast.service
%config /etc/sysconfig/llama
%pre %pre

View file

@ -13,7 +13,7 @@
# It is up to the user to install the correct vendor-specific support. # It is up to the user to install the correct vendor-specific support.
Name: llama.cpp-cublas Name: llama.cpp-cublas
Version: master Version: %( date "+%%Y%%m%%d" )
Release: 1%{?dist} Release: 1%{?dist}
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL) Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
License: MIT License: MIT
@ -40,6 +40,28 @@ cp -p main %{buildroot}%{_bindir}/llamacppcublas
cp -p server %{buildroot}%{_bindir}/llamacppcublasserver cp -p server %{buildroot}%{_bindir}/llamacppcublasserver
cp -p simple %{buildroot}%{_bindir}/llamacppcublassimple cp -p simple %{buildroot}%{_bindir}/llamacppcublassimple
mkdir -p %{buildroot}/usr/lib/systemd/system
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacublas.service
[Unit]
Description=Llama.cpp server, CPU only (no GPU support in this build).
After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
[Service]
Type=simple
EnvironmentFile=/etc/sysconfig/llama
ExecStart=/usr/bin/llamacppcublasserver $LLAMA_ARGS
ExecReload=/bin/kill -s HUP $MAINPID
Restart=never
[Install]
WantedBy=default.target
EOF
mkdir -p %{buildroot}/etc/sysconfig
%{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
EOF
%clean %clean
rm -rf %{buildroot} rm -rf %{buildroot}
rm -rf %{_builddir}/* rm -rf %{_builddir}/*
@ -48,6 +70,8 @@ rm -rf %{_builddir}/*
%{_bindir}/llamacppcublas %{_bindir}/llamacppcublas
%{_bindir}/llamacppcublasserver %{_bindir}/llamacppcublasserver
%{_bindir}/llamacppcublassimple %{_bindir}/llamacppcublassimple
/usr/lib/systemd/system/llamacublas.service
%config /etc/sysconfig/llama
%pre %pre

View file

@ -6,6 +6,7 @@
# Notes for llama.cpp: # Notes for llama.cpp:
# 1. Tags are currently based on hash - which will not sort asciibetically. # 1. Tags are currently based on hash - which will not sort asciibetically.
# We need to declare standard versioning if people want to sort latest releases. # We need to declare standard versioning if people want to sort latest releases.
# In the meantime, YYYYMMDD format will be used.
# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies. # 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed. # 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo # Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
@ -13,12 +14,13 @@
# It is up to the user to install the correct vendor-specific support. # It is up to the user to install the correct vendor-specific support.
Name: llama.cpp Name: llama.cpp
Version: master Version: %( date "+%%Y%%m%%d" )
Release: 1%{?dist} Release: 1%{?dist}
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL) Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
License: MIT License: MIT
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
BuildRequires: coreutils make gcc-c++ git BuildRequires: coreutils make gcc-c++ git libstdc++-devel
Requires: libstdc++
URL: https://github.com/ggerganov/llama.cpp URL: https://github.com/ggerganov/llama.cpp
%define debug_package %{nil} %define debug_package %{nil}
@ -26,27 +28,52 @@ URL: https://github.com/ggerganov/llama.cpp
%description %description
CPU inference for Meta's Lllama2 models using default options. CPU inference for Meta's Lllama2 models using default options.
Models are not included in this package and must be downloaded separately.
%prep %prep
%autosetup %setup -n llama.cpp-master
%build %build
make -j make -j
%install %install
mkdir -p %{buildroot}%{_bindir}/ mkdir -p %{buildroot}%{_bindir}/
cp -p main %{buildroot}%{_bindir}/llamacpp cp -p main %{buildroot}%{_bindir}/llama
cp -p server %{buildroot}%{_bindir}/llamacppserver cp -p server %{buildroot}%{_bindir}/llamaserver
cp -p simple %{buildroot}%{_bindir}/llamacppsimple cp -p simple %{buildroot}%{_bindir}/llamasimple
mkdir -p %{buildroot}/usr/lib/systemd/system
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llama.service
[Unit]
Description=Llama.cpp server, CPU only (no GPU support in this build).
After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
[Service]
Type=simple
EnvironmentFile=/etc/sysconfig/llama
ExecStart=/usr/bin/llamaserver $LLAMA_ARGS
ExecReload=/bin/kill -s HUP $MAINPID
Restart=never
[Install]
WantedBy=default.target
EOF
mkdir -p %{buildroot}/etc/sysconfig
%{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
EOF
%clean %clean
rm -rf %{buildroot} rm -rf %{buildroot}
rm -rf %{_builddir}/* rm -rf %{_builddir}/*
%files %files
%{_bindir}/llamacpp %{_bindir}/llama
%{_bindir}/llamacppserver %{_bindir}/llamaserver
%{_bindir}/llamacppsimple %{_bindir}/llamasimple
/usr/lib/systemd/system/llama.service
%config /etc/sysconfig/llama
%pre %pre

5
.gitignore vendored
View file

@ -63,10 +63,13 @@ poetry.toml
# Test binaries # Test binaries
tests/test-grammar-parser tests/test-grammar-parser
tests/test-llama-grammar
tests/test-double-float tests/test-double-float
tests/test-grad0 tests/test-grad0
tests/test-opt tests/test-opt
tests/test-quantize-fns tests/test-quantize-fns
tests/test-quantize-perf tests/test-quantize-perf
tests/test-sampling tests/test-sampling
tests/test-tokenizer-0 tests/test-tokenizer-0-llama
tests/test-tokenizer-0-falcon
tests/test-tokenizer-1

View file

@ -301,7 +301,7 @@ if (LLAMA_METAL)
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h) set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
add_compile_definitions(GGML_USE_METAL) add_compile_definitions(GGML_USE_METAL)
add_compile_definitions(GGML_METAL_NDEBUG) #add_compile_definitions(GGML_METAL_NDEBUG)
# get full path to the file # get full path to the file
#add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/") #add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")

View file

@ -1,8 +1,8 @@
# Define the default target now so that it is always the first target # Define the default target now so that it is always the first target
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf llama-bench tests/test-c.o BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam_search tests/test-c.o
# Binaries only useful for tests # Binaries only useful for tests
TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0 TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1
default: $(BUILD_TARGETS) default: $(BUILD_TARGETS)
@ -305,7 +305,7 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
endif # LLAMA_HIPBLAS endif # LLAMA_HIPBLAS
ifdef LLAMA_METAL ifdef LLAMA_METAL
CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG CFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG
CXXFLAGS += -DGGML_USE_METAL CXXFLAGS += -DGGML_USE_METAL
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
OBJS += ggml-metal.o OBJS += ggml-metal.o
@ -356,7 +356,7 @@ OBJS += ggml-alloc.o
llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h
$(CXX) $(CXXFLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) -c $< -o $@
common.o: common/common.cpp common/common.h common.o: common/common.cpp common/common.h build-info.h
$(CXX) $(CXXFLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) -c $< -o $@
console.o: common/console.cpp common/console.h console.o: common/console.cpp common/console.h
@ -369,7 +369,7 @@ libllama.so: llama.o ggml.o $(OBJS)
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
clean: clean:
rm -vf *.o tests/*.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch convert-llama2c-to-ggml embd-input-test gguf llama-bench build-info.h $(TEST_TARGETS) rm -vf *.o tests/*.o *.so *.dll benchmark-matmult build-info.h $(BUILD_TARGETS) $(TEST_TARGETS)
# #
# Examples # Examples
@ -409,18 +409,33 @@ $(LIB_PRE)embdinput$(DSO_EXT): examples/embd-input/embd-input.h examples/embd-in
embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS) embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput $(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput
gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS) gguf: examples/gguf/gguf.cpp ggml.o llama.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o common.o $(OBJS) train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp build-info.h ggml.o llama.o $(OBJS) convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp ggml.o llama.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
llama-bench: examples/llama-bench/llama-bench.cpp build-info.h ggml.o llama.o common.o $(OBJS) llama-bench: examples/llama-bench/llama-bench.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
beam_search: examples/beam_search/beam_search.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))'
BUILD_TARGETS += metal
endif
ifdef LLAMA_METAL
metal: examples/metal/metal.cpp ggml.o $(OBJS)
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
endif
build-info.h: $(wildcard .git/index) scripts/build-info.sh build-info.h: $(wildcard .git/index) scripts/build-info.sh
@sh scripts/build-info.sh > $@.tmp @sh scripts/build-info.sh > $@.tmp
@if ! cmp -s $@.tmp $@; then \ @if ! cmp -s $@.tmp $@; then \
@ -442,32 +457,38 @@ benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o
vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS) vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o common.o grammar-parser.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-grammar-parser: tests/test-grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-grammar-parser: tests/test-grammar-parser.cpp build-info.h ggml.o llama.o common.o grammar-parser.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-quantize-perf: tests/test-quantize-perf.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-quantize-perf: tests/test-quantize-perf.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-sampling: tests/test-sampling.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-sampling: tests/test-sampling.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-tokenizer-0: tests/test-tokenizer-0.cpp build-info.h ggml.o llama.o common.o $(OBJS) tests/test-tokenizer-0-falcon: tests/test-tokenizer-0-falcon.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-tokenizer-1: tests/test-tokenizer-1.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
tests/test-c.o: tests/test-c.c llama.h tests/test-c.o: tests/test-c.c llama.h
$(CC) $(CFLAGS) -Werror=implicit-int -c $(filter-out %.h,$^) -o $@ $(CC) $(CFLAGS) -Werror=implicit-int -c $(filter-out %.h,$^) -o $@

View file

@ -113,6 +113,7 @@ as the main playground for developing new features for the [ggml](https://github
- C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp) - C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp)
- Scala 3: [donderom/llm4s](https://github.com/donderom/llm4s) - Scala 3: [donderom/llm4s](https://github.com/donderom/llm4s)
- Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj) - Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj)
- React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn)
**UI:** **UI:**

View file

@ -1,15 +1,21 @@
#include "common.h" #include "common.h"
#include "build-info.h"
#include "llama.h"
#include <cassert>
#include <iostream>
#include <cstring>
#include <fstream>
#include <string>
#include <iterator>
#include <algorithm> #include <algorithm>
#include <sstream> #include <cassert>
#include <unordered_set> #include <cmath>
#include <cstring>
#include <ctime>
#include <fstream>
#include <iterator>
#include <iostream>
#include <regex> #include <regex>
#include <sstream>
#include <string>
#include <unordered_set>
#include <vector>
#include <cinttypes>
#if defined(__APPLE__) && defined(__MACH__) #if defined(__APPLE__) && defined(__MACH__)
#include <sys/types.h> #include <sys/types.h>
@ -19,11 +25,14 @@
#if defined(_WIN32) #if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN
#define NOMINMAX #define NOMINMAX
#include <codecvt>
#include <locale>
#include <windows.h> #include <windows.h>
#include <fcntl.h> #include <fcntl.h>
#include <io.h> #include <io.h>
#else #else
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h> #include <unistd.h>
#endif #endif
@ -93,7 +102,6 @@ void process_escapes(std::string& input) {
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
bool invalid_param = false; bool invalid_param = false;
bool escape_prompt = false;
std::string arg; std::string arg;
gpt_params default_params; gpt_params default_params;
const std::string arg_prefix = "--"; const std::string arg_prefix = "--";
@ -125,8 +133,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break; break;
} }
params.prompt = argv[i]; params.prompt = argv[i];
} else if (arg == "-e") { } else if (arg == "-e" || arg == "--escape") {
escape_prompt = true; params.escape = true;
} else if (arg == "--prompt-cache") { } else if (arg == "--prompt-cache") {
if (++i >= argc) { if (++i >= argc) {
invalid_param = true; invalid_param = true;
@ -415,6 +423,16 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break; break;
} }
params.antiprompt.push_back(argv[i]); params.antiprompt.push_back(argv[i]);
} else if (arg == "-ld" || arg == "--logdir") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.logdir = argv[i];
if (params.logdir.back() != DIRECTORY_SEPARATOR) {
params.logdir += DIRECTORY_SEPARATOR;
}
} else if (arg == "--perplexity") { } else if (arg == "--perplexity") {
params.perplexity = true; params.perplexity = true;
} else if (arg == "--ppl-stride") { } else if (arg == "--ppl-stride") {
@ -520,7 +538,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
exit(1); exit(1);
} }
if (escape_prompt) { if (params.escape) {
process_escapes(params.prompt); process_escapes(params.prompt);
process_escapes(params.input_prefix); process_escapes(params.input_prefix);
process_escapes(params.input_suffix); process_escapes(params.input_suffix);
@ -546,7 +564,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
fprintf(stdout, " -p PROMPT, --prompt PROMPT\n"); fprintf(stdout, " -p PROMPT, --prompt PROMPT\n");
fprintf(stdout, " prompt to start generation with (default: empty)\n"); fprintf(stdout, " prompt to start generation with (default: empty)\n");
fprintf(stdout, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n"); fprintf(stdout, " -e, --escape process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
fprintf(stdout, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n"); fprintf(stdout, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
fprintf(stdout, " --prompt-cache-all if specified, saves user input and generations to cache as well.\n"); fprintf(stdout, " --prompt-cache-all if specified, saves user input and generations to cache as well.\n");
fprintf(stdout, " not supported with --interactive or other interactive options\n"); fprintf(stdout, " not supported with --interactive or other interactive options\n");
@ -627,6 +645,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stdout, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); fprintf(stdout, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
fprintf(stdout, " -m FNAME, --model FNAME\n"); fprintf(stdout, " -m FNAME, --model FNAME\n");
fprintf(stdout, " model path (default: %s)\n", params.model.c_str()); fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
fprintf(stdout, " -ld LOGDIR, --logdir LOGDIR\n");
fprintf(stdout, " path under which to save YAML logs (no logging if unset)\n");
fprintf(stdout, "\n"); fprintf(stdout, "\n");
} }
@ -779,3 +799,289 @@ std::string llama_detokenize_bpe(llama_context * ctx, const std::vector<llama_to
return result; return result;
} }
// returns true if successful, false otherwise
bool create_directory_with_parents(const std::string & path) {
#ifdef _WIN32
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
std::wstring wpath = converter.from_bytes(path);
// if the path already exists, check whether it's a directory
const DWORD attributes = GetFileAttributesW(wpath.c_str());
if ((attributes != INVALID_FILE_ATTRIBUTES) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
return true;
}
size_t pos_slash = 0;
// process path from front to back, procedurally creating directories
while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) {
const std::wstring subpath = wpath.substr(0, pos_slash);
const wchar_t * test = subpath.c_str();
const bool success = CreateDirectoryW(test, NULL);
if (!success) {
const DWORD error = GetLastError();
// if the path already exists, ensure that it's a directory
if (error == ERROR_ALREADY_EXISTS) {
const DWORD attributes = GetFileAttributesW(subpath.c_str());
if (attributes == INVALID_FILE_ATTRIBUTES || !(attributes & FILE_ATTRIBUTE_DIRECTORY)) {
return false;
}
} else {
return false;
}
}
pos_slash += 1;
}
return true;
#else
// if the path already exists, check whether it's a directory
struct stat info;
if (stat(path.c_str(), &info) == 0) {
return S_ISDIR(info.st_mode);
}
size_t pos_slash = 1; // skip leading slashes for directory creation
// process path from front to back, procedurally creating directories
while ((pos_slash = path.find('/', pos_slash)) != std::string::npos) {
const std::string subpath = path.substr(0, pos_slash);
struct stat info;
// if the path already exists, ensure that it's a directory
if (stat(subpath.c_str(), &info) == 0) {
if (!S_ISDIR(info.st_mode)) {
return false;
}
} else {
// create parent directories
const int ret = mkdir(subpath.c_str(), 0755);
if (ret != 0) {
return false;
}
}
pos_slash += 1;
}
return true;
#endif // _WIN32
}
void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data) {
if (data.empty()) {
fprintf(stream, "%s:\n", prop_name);
return;
}
fprintf(stream, "%s: [", prop_name);
for (size_t i = 0; i < data.size() - 1; ++i) {
fprintf(stream, "%e, ", data[i]);
}
fprintf(stream, "%e]\n", data.back());
}
void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data) {
if (data.empty()) {
fprintf(stream, "%s:\n", prop_name);
return;
}
fprintf(stream, "%s: [", prop_name);
for (size_t i = 0; i < data.size() - 1; ++i) {
fprintf(stream, "%d, ", data[i]);
}
fprintf(stream, "%d]\n", data.back());
}
void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data) {
std::string data_str(data == NULL ? "" : data);
if (data_str.empty()) {
fprintf(stream, "%s:\n", prop_name);
return;
}
size_t pos_start = 0;
size_t pos_found = 0;
if (!data_str.empty() && (std::isspace(data_str[0]) || std::isspace(data_str.back()))) {
data_str = std::regex_replace(data_str, std::regex("\n"), "\\n");
data_str = std::regex_replace(data_str, std::regex("\""), "\\\"");
data_str = "\"" + data_str + "\"";
fprintf(stream, "%s: %s\n", prop_name, data_str.c_str());
return;
}
if (data_str.find('\n') == std::string::npos) {
fprintf(stream, "%s: %s\n", prop_name, data_str.c_str());
return;
}
fprintf(stream, "%s: |\n", prop_name);
while ((pos_found = data_str.find('\n', pos_start)) != std::string::npos) {
fprintf(stream, " %s\n", data_str.substr(pos_start, pos_found-pos_start).c_str());
pos_start = pos_found + 1;
}
}
std::string get_sortable_timestamp() {
using clock = std::chrono::system_clock;
const clock::time_point current_time = clock::now();
const time_t as_time_t = clock::to_time_t(current_time);
char timestamp_no_ns[100];
std::strftime(timestamp_no_ns, 100, "%Y_%m_%d-%H_%M_%S", std::localtime(&as_time_t));
const int64_t ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
current_time.time_since_epoch() % 1000000000).count();
char timestamp_ns[11];
snprintf(timestamp_ns, 11, "%09" PRId64, ns);
return std::string(timestamp_no_ns) + "." + std::string(timestamp_ns);
}
void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const llama_context * lctx,
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc) {
fprintf(stream, "build_commit: %s\n", BUILD_COMMIT);
fprintf(stream, "build_number: %d\n", BUILD_NUMBER);
fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false");
fprintf(stream, "cpu_has_avx: %s\n", ggml_cpu_has_avx() ? "true" : "false");
fprintf(stream, "cpu_has_avx2: %s\n", ggml_cpu_has_avx2() ? "true" : "false");
fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false");
fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false");
fprintf(stream, "cpu_has_avx512_vnni: %s\n", ggml_cpu_has_avx512_vnni() ? "true" : "false");
fprintf(stream, "cpu_has_blas: %s\n", ggml_cpu_has_blas() ? "true" : "false");
fprintf(stream, "cpu_has_cublas: %s\n", ggml_cpu_has_cublas() ? "true" : "false");
fprintf(stream, "cpu_has_clblast: %s\n", ggml_cpu_has_clblast() ? "true" : "false");
fprintf(stream, "cpu_has_fma: %s\n", ggml_cpu_has_fma() ? "true" : "false");
fprintf(stream, "cpu_has_gpublas: %s\n", ggml_cpu_has_gpublas() ? "true" : "false");
fprintf(stream, "cpu_has_neon: %s\n", ggml_cpu_has_neon() ? "true" : "false");
fprintf(stream, "cpu_has_f16c: %s\n", ggml_cpu_has_f16c() ? "true" : "false");
fprintf(stream, "cpu_has_fp16_va: %s\n", ggml_cpu_has_fp16_va() ? "true" : "false");
fprintf(stream, "cpu_has_wasm_simd: %s\n", ggml_cpu_has_wasm_simd() ? "true" : "false");
fprintf(stream, "cpu_has_blas: %s\n", ggml_cpu_has_blas() ? "true" : "false");
fprintf(stream, "cpu_has_sse3: %s\n", ggml_cpu_has_sse3() ? "true" : "false");
fprintf(stream, "cpu_has_vsx: %s\n", ggml_cpu_has_vsx() ? "true" : "false");
#ifdef NDEBUG
fprintf(stream, "debug: false\n");
#else
fprintf(stream, "debug: true\n");
#endif // NDEBUG
fprintf(stream, "model_desc: %s\n", model_desc);
fprintf(stream, "n_vocab: %d # output size of the final layer, 32001 for some models\n", llama_n_vocab(lctx));
#ifdef __OPTIMIZE__
fprintf(stream, "optimize: true\n");
#else
fprintf(stream, "optimize: false\n");
#endif // __OPTIMIZE__
fprintf(stream, "time: %s\n", timestamp.c_str());
fprintf(stream, "\n");
fprintf(stream, "###############\n");
fprintf(stream, "# User Inputs #\n");
fprintf(stream, "###############\n");
fprintf(stream, "\n");
fprintf(stream, "alias: %s # default: unknown\n", params.model_alias.c_str());
fprintf(stream, "batch_size: %d # default: 512\n", params.n_batch);
dump_string_yaml_multiline(stream, "cfg_negative_prompt", params.cfg_negative_prompt.c_str());
fprintf(stream, "cfg_scale: %f # default: 1.0\n", params.cfg_scale);
fprintf(stream, "chunks: %d # default: -1 (unlimited)\n", params.n_chunks);
fprintf(stream, "color: %s # default: false\n", params.use_color ? "true" : "false");
fprintf(stream, "ctx_size: %d # default: 512\n", params.n_ctx);
fprintf(stream, "escape: %s # default: false\n", params.escape ? "true" : "false");
fprintf(stream, "export: %s # default: false\n", params.export_cgraph ? "true" : "false");
fprintf(stream, "file: # never logged, see prompt instead. Can still be specified for input.\n");
fprintf(stream, "frequency_penalty: %f # default: 0.0 \n", params.frequency_penalty);
dump_string_yaml_multiline(stream, "grammar", params.grammar.c_str());
fprintf(stream, "grammar-file: # never logged, see grammar instead. Can still be specified for input.\n");
fprintf(stream, "hellaswag: %s # default: false\n", params.hellaswag ? "true" : "false");
fprintf(stream, "hellaswag_tasks: %ld # default: 400\n", params.hellaswag_tasks);
const auto logit_bias_eos = params.logit_bias.find(llama_token_eos(lctx));
const bool ignore_eos = logit_bias_eos != params.logit_bias.end() && logit_bias_eos->second == -INFINITY;
fprintf(stream, "ignore_eos: %s # default: false\n", ignore_eos ? "true" : "false");
dump_string_yaml_multiline(stream, "in_prefix", params.input_prefix.c_str());
fprintf(stream, "in_prefix_bos: %s # default: false\n", params.input_prefix_bos ? "true" : "false");
dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
fprintf(stream, "logit_bias:\n");
for (std::pair<llama_token, float> lb : params.logit_bias) {
if (ignore_eos && lb.first == logit_bias_eos->first) {
continue;
}
fprintf(stream, " %d: %f", lb.first, lb.second);
}
fprintf(stream, "lora: %s\n", params.lora_adapter.c_str());
fprintf(stream, "lora_base: %s\n", params.lora_base.c_str());
fprintf(stream, "low_vram: %s # default: false\n", params.low_vram ? "true" : "false");
fprintf(stream, "main_gpu: %d # default: 0\n", params.main_gpu);
fprintf(stream, "memory_f32: %s # default: false\n", !params.memory_f16 ? "true" : "false");
fprintf(stream, "mirostat: %d # default: 0 (disabled)\n", params.mirostat);
fprintf(stream, "mirostat_ent: %f # default: 5.0\n", params.mirostat_tau);
fprintf(stream, "mirostat_lr: %f # default: 0.1\n", params.mirostat_eta);
fprintf(stream, "mlock: %s # default: false\n", params.use_mlock ? "true" : "false");
fprintf(stream, "model: %s # default: models/7B/ggml-model.bin\n", params.model.c_str());
fprintf(stream, "mtest: %s # default: false\n", params.mem_test ? "true" : "false");
fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false");
fprintf(stream, "n_gpu_layers: %d # default: 0\n", params.n_gpu_layers);
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", params.n_probs);
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false");
fprintf(stream, "no_penalize_nl: %s # default: false\n", !params.penalize_nl ? "true" : "false");
fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false");
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
fprintf(stream, "presence_penalty: %f # default: 0.0\n", params.presence_penalty);
dump_string_yaml_multiline(stream, "prompt", params.prompt.c_str());
fprintf(stream, "prompt_cache: %s\n", params.path_prompt_cache.c_str());
fprintf(stream, "prompt_cache_all: %s # default: false\n", params.prompt_cache_all ? "true" : "false");
fprintf(stream, "prompt_cache_ro: %s # default: false\n", params.prompt_cache_ro ? "true" : "false");
dump_vector_int_yaml(stream, "prompt_tokens", prompt_tokens);
fprintf(stream, "random_prompt: %s # default: false\n", params.random_prompt ? "true" : "false");
fprintf(stream, "repeat_penalty: %f # default: 1.1\n", params.repeat_penalty);
fprintf(stream, "reverse_prompt:\n");
for (std::string ap : params.antiprompt) {
size_t pos = 0;
while ((pos = ap.find('\n', pos)) != std::string::npos) {
ap.replace(pos, 1, "\\n");
pos += 1;
}
fprintf(stream, " - %s\n", ap.c_str());
}
fprintf(stream, "rope_freq_base: %f # default: 10000.0\n", params.rope_freq_base);
fprintf(stream, "rope_freq_scale: %f # default: 1.0\n", params.rope_freq_scale);
fprintf(stream, "seed: %d # default: -1 (random seed)\n", params.seed);
fprintf(stream, "simple_io: %s # default: false\n", params.simple_io ? "true" : "false");
fprintf(stream, "temp: %f # default: 0.8\n", params.temp);
const std::vector<float> tensor_split_vector(params.tensor_split, params.tensor_split + LLAMA_MAX_DEVICES);
dump_vector_float_yaml(stream, "tensor_split", tensor_split_vector);
fprintf(stream, "tfs: %f # default: 1.0\n", params.tfs_z);
fprintf(stream, "threads: %d # default: %d\n", params.n_threads, std::thread::hardware_concurrency());
fprintf(stream, "top_k: %d # default: 40\n", params.top_k);
fprintf(stream, "top_p: %f # default: 0.95\n", params.top_p);
fprintf(stream, "typical_p: %f # default: 1.0\n", params.typical_p);
fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false");
}

View file

@ -11,6 +11,12 @@
#include <unordered_map> #include <unordered_map>
#include <tuple> #include <tuple>
#ifdef _WIN32
#define DIRECTORY_SEPARATOR '\\'
#else
#define DIRECTORY_SEPARATOR '/'
#endif // _WIN32
// //
// CLI argument parsing // CLI argument parsing
// //
@ -61,6 +67,7 @@ struct gpt_params {
std::string input_suffix = ""; // string to suffix user inputs with std::string input_suffix = ""; // string to suffix user inputs with
std::string grammar = ""; // optional BNF-like grammar to constrain sampling std::string grammar = ""; // optional BNF-like grammar to constrain sampling
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
std::string logdir = ""; // directory in which to save YAML log files
std::string lora_adapter = ""; // lora adapter path std::string lora_adapter = ""; // lora adapter path
std::string lora_base = ""; // base model path for the lora adapter std::string lora_base = ""; // base model path for the lora adapter
@ -82,6 +89,7 @@ struct gpt_params {
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
bool embedding = false; // get only sentence embedding bool embedding = false; // get only sentence embedding
bool escape = false; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
bool interactive_first = false; // wait for user input immediately bool interactive_first = false; // wait for user input immediately
bool multiline_input = false; // reverse the usage of `\` bool multiline_input = false; // reverse the usage of `\`
bool simple_io = false; // improves compatibility with subprocesses and limited consoles bool simple_io = false; // improves compatibility with subprocesses and limited consoles
@ -144,3 +152,13 @@ std::string llama_detokenize_spm(
std::string llama_detokenize_bpe( std::string llama_detokenize_bpe(
llama_context * ctx, llama_context * ctx,
const std::vector<llama_token> & tokens); const std::vector<llama_token> & tokens);
bool create_directory_with_parents(const std::string & path);
void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
std::string get_sortable_timestamp();
void dump_non_result_info_yaml(
FILE * stream, const gpt_params & params, const llama_context * lctx,
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);

View file

@ -48,7 +48,7 @@ def count_model_parts(dir_model: str) -> int:
if len(sys.argv) < 3: if len(sys.argv) < 3:
print("Usage: convert-h5-to-ggml.py dir-model ftype\n") print(f"Usage: python {sys.argv[0]} dir-model ftype\n")
print(" ftype == 0 -> float32") print(" ftype == 0 -> float32")
print(" ftype == 1 -> float16") print(" ftype == 1 -> float16")
sys.exit(1) sys.exit(1)

View file

@ -50,7 +50,7 @@ def count_model_parts(dir_model: str) -> int:
if len(sys.argv) < 3: if len(sys.argv) < 3:
print("Usage: convert-h5-to-ggml.py dir-model ftype\n") print(f"Usage: python {sys.argv[0]} dir-model ftype\n")
print(" ftype == 0 -> float32") print(" ftype == 0 -> float32")
print(" ftype == 1 -> float16") print(" ftype == 1 -> float16")
sys.exit(1) sys.exit(1)

View file

@ -32,7 +32,7 @@ def count_model_parts(dir_model: str) -> int:
if len(sys.argv) < 3: if len(sys.argv) < 3:
print("Usage: convert-h5-to-ggml.py dir-model ftype\n") print(f"Usage: python {sys.argv[0]} dir-model ftype\n")
print(" ftype == 0 -> float32") print(" ftype == 0 -> float32")
print(" ftype == 1 -> float16") print(" ftype == 1 -> float16")

View file

@ -44,7 +44,7 @@ def count_model_parts(dir_model: str) -> int:
if len(sys.argv) < 3: if len(sys.argv) < 3:
print("Usage: convert-h5-to-ggml.py dir-model ftype\n") print(f"Usage: python {sys.argv[0]} dir-model ftype\n")
print(" ftype == 0 -> float32") print(" ftype == 0 -> float32")
print(" ftype == 1 -> float16") print(" ftype == 1 -> float16")

View file

@ -469,7 +469,7 @@ class UnquantizedTensor(Tensor):
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
r = self.ndarray.shape[0] // 3 r = self.ndarray.shape[0] // 3
return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head)) return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head, n_head))
def part(self, n_part: int) -> 'UnquantizedTensor': def part(self, n_part: int) -> 'UnquantizedTensor':
r = self.ndarray.shape[0] // 3 r = self.ndarray.shape[0] // 3
@ -952,9 +952,10 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
#tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] #tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
elif f"model.layers.{i}.self_attn.W_pack.weight" in model: elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
print(f"Unpacking and permuting layer {i}") print(f"Unpacking and permuting layer {i}")
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head) tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv) tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
del tmp[f"model.layers.{i}.self_attn.W_pack.weight"]
else: else:
break break

View file

@ -681,7 +681,6 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
// for rms-att-weight // for rms-att-weight
int row_length = model->hparams.n_embd; int row_length = model->hparams.n_embd;
const auto & hparams = model->hparams;
int n_ff = model->hparams.n_ff; int n_ff = model->hparams.n_ff;
for (uint32_t i = 0; i < model->hparams.n_layer; ++i){ for (uint32_t i = 0; i < model->hparams.n_layer; ++i){

View file

@ -0,0 +1,5 @@
set(TARGET gguf)
add_executable(${TARGET} gguf.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_11)

View file

@ -3,6 +3,9 @@
#include <cassert> #include <cassert>
#include <chrono> #include <chrono>
#include <cinttypes> #include <cinttypes>
#include <clocale>
#include <cmath>
#include <cstdio>
#include <cstring> #include <cstring>
#include <ctime> #include <ctime>
#include <iterator> #include <iterator>
@ -10,7 +13,6 @@
#include <numeric> #include <numeric>
#include <regex> #include <regex>
#include <sstream> #include <sstream>
#include <stdio.h>
#include <string> #include <string>
#include <vector> #include <vector>
@ -916,6 +918,9 @@ static void llama_null_log_callback(enum llama_log_level level, const char * tex
} }
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
// try to set locale for unicode characters in markdown
setlocale(LC_CTYPE, ".UTF-8");
#if !defined(NDEBUG) #if !defined(NDEBUG)
fprintf(stderr, "warning: asserts enabled, performance may be affected\n"); fprintf(stderr, "warning: asserts enabled, performance may be affected\n");
#endif #endif

View file

@ -17,6 +17,7 @@
#include <ctime> #include <ctime>
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <sstream>
#include <string> #include <string>
#include <vector> #include <vector>
@ -36,9 +37,57 @@
#pragma warning(disable: 4244 4267) // possible loss of data #pragma warning(disable: 4244 4267) // possible loss of data
#endif #endif
static llama_context ** g_ctx; static llama_context ** g_ctx;
static llama_model ** g_model;
static gpt_params * g_params;
static std::vector<llama_token> * g_input_tokens;
static std::ostringstream * g_output_ss;
static std::vector<llama_token> * g_output_tokens;
static bool is_interacting = false; static bool is_interacting = false;
void write_logfile(
const llama_context * ctx, const gpt_params & params, const llama_model * model,
const std::vector<llama_token> input_tokens, const std::string output, const std::vector<llama_token> output_tokens) {
if (params.logdir.empty()) {
return;
}
const std::string timestamp = get_sortable_timestamp();
const bool success = create_directory_with_parents(params.logdir);
if (!success) {
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
__func__, params.logdir.c_str());
return;
}
const std::string logfile_path = params.logdir + timestamp + ".yml";
FILE * logfile = fopen(logfile_path.c_str(), "w");
if (logfile == NULL) {
fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
return;
}
fprintf(logfile, "binary: main\n");
char model_desc[128];
llama_model_desc(model, model_desc, sizeof(model_desc));
dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc);
fprintf(logfile, "\n");
fprintf(logfile, "######################\n");
fprintf(logfile, "# Generation Results #\n");
fprintf(logfile, "######################\n");
fprintf(logfile, "\n");
dump_string_yaml_multiline(logfile, "output", output.c_str());
dump_vector_int_yaml(logfile, "output_tokens", output_tokens);
llama_dump_timing_info_yaml(logfile, ctx);
fclose(logfile);
}
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
void sigint_handler(int signo) { void sigint_handler(int signo) {
if (signo == SIGINT) { if (signo == SIGINT) {
@ -48,6 +97,7 @@ void sigint_handler(int signo) {
console::cleanup(); console::cleanup();
printf("\n"); printf("\n");
llama_print_timings(*g_ctx); llama_print_timings(*g_ctx);
write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
_exit(130); _exit(130);
} }
} }
@ -56,6 +106,7 @@ void sigint_handler(int signo) {
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
g_params = &params;
if (gpt_params_parse(argc, argv, params) == false) { if (gpt_params_parse(argc, argv, params) == false) {
return 1; return 1;
@ -116,6 +167,7 @@ int main(int argc, char ** argv) {
llama_model * model; llama_model * model;
llama_context * ctx; llama_context * ctx;
llama_context * ctx_guidance = NULL; llama_context * ctx_guidance = NULL;
g_model = &model;
g_ctx = &ctx; g_ctx = &ctx;
// load the model and apply lora adapter, if any // load the model and apply lora adapter, if any
@ -397,6 +449,10 @@ int main(int argc, char ** argv) {
int n_session_consumed = 0; int n_session_consumed = 0;
int n_past_guidance = 0; int n_past_guidance = 0;
std::vector<int> input_tokens; g_input_tokens = &input_tokens;
std::vector<int> output_tokens; g_output_tokens = &output_tokens;
std::ostringstream output_ss; g_output_ss = &output_ss;
// the first thing we will do is to output the prompt, so set color accordingly // the first thing we will do is to output the prompt, so set color accordingly
console::set_display(console::prompt); console::set_display(console::prompt);
@ -667,7 +723,15 @@ int main(int argc, char ** argv) {
// display text // display text
if (input_echo) { if (input_echo) {
for (auto id : embd) { for (auto id : embd) {
printf("%s", llama_token_to_piece(ctx, id).c_str()); const std::string token_str = llama_token_to_piece(ctx, id);
printf("%s", token_str.c_str());
if (embd.size() > 1) {
input_tokens.push_back(id);
} else {
output_tokens.push_back(id);
output_ss << token_str;
}
} }
fflush(stdout); fflush(stdout);
} }
@ -761,6 +825,8 @@ int main(int argc, char ** argv) {
printf("%s", params.input_suffix.c_str()); printf("%s", params.input_suffix.c_str());
} }
const size_t original_size = embd_inp.size();
// instruct mode: insert instruction prefix // instruct mode: insert instruction prefix
if (params.instruct && !is_antiprompt) { if (params.instruct && !is_antiprompt) {
n_consumed = embd_inp.size(); n_consumed = embd_inp.size();
@ -775,6 +841,12 @@ int main(int argc, char ** argv) {
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
} }
for (size_t i = original_size; i < embd_inp.size(); ++i) {
const llama_token token = embd_inp[i];
output_tokens.push_back(token);
output_ss << llama_token_to_piece(ctx, token);
}
n_remain -= line_inp.size(); n_remain -= line_inp.size();
} }
@ -817,6 +889,8 @@ int main(int argc, char ** argv) {
} }
llama_print_timings(ctx); llama_print_timings(ctx);
write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
if (ctx_guidance) { llama_free(ctx_guidance); } if (ctx_guidance) { llama_free(ctx_guidance); }
llama_free(ctx); llama_free(ctx);
llama_free_model(model); llama_free_model(model);

View file

@ -3,16 +3,79 @@
#include "build-info.h" #include "build-info.h"
#include <cmath> #include <cmath>
#include <cstdio>
#include <cstring>
#include <ctime> #include <ctime>
#include <sstream> #include <sstream>
#include <cstring>
#include <thread> #include <thread>
#include <mutex> #include <mutex>
#include <vector>
#if defined(_MSC_VER) #if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data #pragma warning(disable: 4244 4267) // possible loss of data
#endif #endif
struct results_perplexity {
std::vector<llama_token> tokens;
double ppl_value;
std::vector<float> logits;
std::vector<float> probs;
};
struct results_log_softmax {
double log_softmax;
float logit;
float prob;
};
void write_logfile(const llama_context * ctx, const gpt_params & params,
const llama_model * model, const struct results_perplexity & results) {
if (params.logdir.empty()) {
return;
}
if (params.hellaswag) {
fprintf(stderr, "%s: warning: logging results is not implemented for HellaSwag. No files will be written.\n", __func__);
return;
}
const std::string timestamp = get_sortable_timestamp();
const bool success = create_directory_with_parents(params.logdir);
if (!success) {
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
__func__, params.logdir.c_str());
return;
}
const std::string logfile_path = params.logdir + timestamp + ".yml";
FILE * logfile = fopen(logfile_path.c_str(), "w");
if (logfile == NULL) {
fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
return;
}
fprintf(logfile, "binary: main\n");
char model_desc[128];
llama_model_desc(model, model_desc, sizeof(model_desc));
dump_non_result_info_yaml(logfile, params, ctx, timestamp, results.tokens, model_desc);
fprintf(logfile, "\n");
fprintf(logfile, "######################\n");
fprintf(logfile, "# Perplexity Results #\n");
fprintf(logfile, "######################\n");
fprintf(logfile, "\n");
dump_vector_float_yaml(logfile, "logits", results.logits);
fprintf(logfile, "ppl_value: %f\n", results.ppl_value);
dump_vector_float_yaml(logfile, "probs", results.probs);
llama_dump_timing_info_yaml(logfile, ctx);
fclose(logfile);
}
std::vector<float> softmax(const std::vector<float>& logits) { std::vector<float> softmax(const std::vector<float>& logits) {
std::vector<float> probs(logits.size()); std::vector<float> probs(logits.size());
float max_logit = logits[0]; float max_logit = logits[0];
@ -29,20 +92,20 @@ std::vector<float> softmax(const std::vector<float>& logits) {
return probs; return probs;
} }
float log_softmax(int n_vocab, const float * logits, int tok) { results_log_softmax log_softmax(int n_vocab, const float * logits, int tok) {
float max_logit = logits[0]; float max_logit = logits[0];
for (int i = 1; i < n_vocab; ++i) max_logit = std::max(max_logit, logits[i]); for (int i = 1; i < n_vocab; ++i) max_logit = std::max(max_logit, logits[i]);
double sum_exp = 0.0; double sum_exp = 0.0;
for (int i = 0; i < n_vocab; ++i) sum_exp += expf(logits[i] - max_logit); for (int i = 0; i < n_vocab; ++i) sum_exp += expf(logits[i] - max_logit);
return logits[tok] - max_logit - log(sum_exp); return {logits[tok] - max_logit - log(sum_exp), logits[tok], expf(logits[tok] - max_logit) / (float) sum_exp};
} }
void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token, std::vector<std::thread>& workers, void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token, std::vector<std::thread> & workers,
double& nll, double& nll2) { double & nll, double & nll2, float * logit_history, float * prob_history) {
std::mutex mutex; std::mutex mutex;
int counter = 0; int counter = 0;
auto compute = [&mutex, &counter, &nll, &nll2, n_vocab, logits, tokens, n_token] () { auto compute = [&mutex, &counter, &nll, &nll2, logit_history, prob_history, n_vocab, logits, tokens, n_token] () {
double local_nll = 0, local_nll2 = 0; double local_nll = 0, local_nll2 = 0;
while (true) { while (true) {
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
@ -52,34 +115,43 @@ void process_logits(int n_vocab, const float * logits, const int * tokens, int n
break; break;
} }
lock.unlock(); lock.unlock();
double v = -log_softmax(n_vocab, logits + i*n_vocab, tokens[i+1]); const results_log_softmax results = log_softmax(n_vocab, logits + i*n_vocab, tokens[i+1]);
const double v = -results.log_softmax;
local_nll += v; local_nll += v;
local_nll2 += v*v; local_nll2 += v*v;
logit_history[i] = results.logit;
prob_history[i] = results.prob;
} }
}; };
for (auto& w : workers) w = std::thread(compute); for (auto & w : workers) w = std::thread(compute);
compute(); compute();
for (auto& w : workers) w.join(); for (auto & w : workers) w.join();
} }
void perplexity_v2(llama_context * ctx, const gpt_params & params) { results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params) {
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
// Output: `perplexity: 13.5106 [114/114]` // Output: `perplexity: 13.5106 [114/114]`
// BOS tokens will be added for each chunk before eval // BOS tokens will be added for each chunk before eval
if (params.ppl_stride <= 0) {
fprintf(stderr, "%s: stride is %d but must be greater than zero!\n",__func__,params.ppl_stride);
return;
}
const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM; const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
const bool add_bos = is_spm; const bool add_bos = is_spm;
fprintf(stderr, "%s: tokenizing the input ..\n", __func__); fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
auto tokens = ::llama_tokenize(ctx, params.prompt, add_bos); std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
std::vector<float> logit_history;
std::vector<float> prob_history;
logit_history.resize(tokens.size());
prob_history.resize(tokens.size());
if (params.ppl_stride <= 0) {
fprintf(stderr, "%s: stride is %d but must be greater than zero!\n",__func__,params.ppl_stride);
return {tokens, -1, logit_history, prob_history};
}
const int calc_chunk = params.n_ctx; const int calc_chunk = params.n_ctx;
@ -88,7 +160,7 @@ void perplexity_v2(llama_context * ctx, const gpt_params & params) {
if (int(tokens.size()) <= calc_chunk) { if (int(tokens.size()) <= calc_chunk) {
fprintf(stderr, "%s: there are only %zu tokens, this is not enough for a context size of %d and stride %d\n",__func__, fprintf(stderr, "%s: there are only %zu tokens, this is not enough for a context size of %d and stride %d\n",__func__,
tokens.size(), params.n_ctx, params.ppl_stride); tokens.size(), params.n_ctx, params.ppl_stride);
return; return {tokens, -1, logit_history, prob_history};
} }
const int n_chunk_max = (tokens.size() - calc_chunk + params.ppl_stride - 1) / params.ppl_stride; const int n_chunk_max = (tokens.size() - calc_chunk + params.ppl_stride - 1) / params.ppl_stride;
@ -120,7 +192,7 @@ void perplexity_v2(llama_context * ctx, const gpt_params & params) {
//fprintf(stderr, " Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch); //fprintf(stderr, " Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) { if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
//fprintf(stderr, "%s : failed to eval\n", __func__); //fprintf(stderr, "%s : failed to eval\n", __func__);
return; return {tokens, -1, logit_history, prob_history};
} }
// save original token and restore it after eval // save original token and restore it after eval
@ -161,6 +233,8 @@ void perplexity_v2(llama_context * ctx, const gpt_params & params) {
logits.begin() + (j + 1) * n_vocab); logits.begin() + (j + 1) * n_vocab);
const float prob = softmax(tok_logits)[tokens[start + j + 1]]; const float prob = softmax(tok_logits)[tokens[start + j + 1]];
logit_history[start + j + 1] = tok_logits[tokens[start + j + 1]];
prob_history[start + j + 1] = prob;
nll += -std::log(prob); nll += -std::log(prob);
++count; ++count;
@ -174,12 +248,14 @@ void perplexity_v2(llama_context * ctx, const gpt_params & params) {
fflush(stdout); fflush(stdout);
} }
printf("\n"); printf("\n");
return {tokens, std::exp(nll / count), logit_history, prob_history};
} }
void perplexity(llama_context * ctx, const gpt_params & params) { results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
if (params.ppl_stride > 0) { if (params.ppl_stride > 0) {
perplexity_v2(ctx, params); return perplexity_v2(ctx, params);
return;
} }
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
@ -193,11 +269,17 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
auto tim1 = std::chrono::high_resolution_clock::now(); auto tim1 = std::chrono::high_resolution_clock::now();
fprintf(stderr, "%s: tokenizing the input ..\n", __func__); fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
auto tokens = ::llama_tokenize(ctx, params.prompt, add_bos); std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
auto tim2 = std::chrono::high_resolution_clock::now(); auto tim2 = std::chrono::high_resolution_clock::now();
fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count()); fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
std::vector<float> logit_history;
logit_history.resize(tokens.size());
std::vector<float> prob_history;
prob_history.resize(tokens.size());
const int n_chunk_max = tokens.size() / params.n_ctx; const int n_chunk_max = tokens.size() / params.n_ctx;
const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max); const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
@ -236,7 +318,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) { if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
fprintf(stderr, "%s : failed to eval\n", __func__); fprintf(stderr, "%s : failed to eval\n", __func__);
return; return {tokens, -1, logit_history, prob_history};
} }
// restore the original token in case it was set to BOS // restore the original token in case it was set to BOS
@ -272,7 +354,8 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
// last 256 tokens. Then, we split the input up into context window size chunks to // last 256 tokens. Then, we split the input up into context window size chunks to
// process the entire prompt. // process the entire prompt.
const int first = std::min(512, params.n_ctx/2); const int first = std::min(512, params.n_ctx/2);
process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, params.n_ctx - 1 - first, workers, nll, nll2); process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, params.n_ctx - 1 - first,
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
count += params.n_ctx - first - 1; count += params.n_ctx - first - 1;
// perplexity is e^(average negative log-likelihood) // perplexity is e^(average negative log-likelihood)
@ -287,16 +370,19 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
fflush(stdout); fflush(stdout);
} }
printf("\n"); printf("\n");
nll2 /= count; nll2 /= count;
nll /= count; nll /= count;
const double ppl = exp(nll);
nll2 -= nll * nll; nll2 -= nll * nll;
if (nll2 > 0) { if (nll2 > 0) {
nll2 = sqrt(nll2/(count-1)); nll2 = sqrt(nll2/(count-1));
double ppl = exp(nll);
printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl); printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl);
} else { } else {
printf("Unexpected negative standard deviation of log(prob)\n"); printf("Unexpected negative standard deviation of log(prob)\n");
} }
return {tokens, ppl, logit_history, prob_history};
} }
std::vector<float> hellaswag_evaluate_tokens(llama_context * ctx, const std::vector<int>& tokens, int n_past, int n_batch, std::vector<float> hellaswag_evaluate_tokens(llama_context * ctx, const std::vector<int>& tokens, int n_past, int n_batch,
@ -604,13 +690,16 @@ int main(int argc, char ** argv) {
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
} }
struct results_perplexity results;
if (params.hellaswag) { if (params.hellaswag) {
hellaswag_score(ctx, params); hellaswag_score(ctx, params);
} else { } else {
perplexity(ctx, params); results = perplexity(ctx, params);
} }
llama_print_timings(ctx); llama_print_timings(ctx);
write_logfile(ctx, params, model, results);
llama_free(ctx); llama_free(ctx);
llama_free_model(model); llama_free_model(model);

View file

@ -100,7 +100,7 @@ int main(int argc, char ** argv) {
} }
} }
if (argc - arg_idx < 3) { if (argc - arg_idx < 2) {
usage(argv[0]); usage(argv[0]);
} }
@ -114,7 +114,7 @@ int main(int argc, char ** argv) {
std::string ftype_str; std::string ftype_str;
if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
std::string fpath; std::string fpath;
const size_t pos = fname_inp.find_last_of('/'); const size_t pos = fname_inp.find_last_of("/\\");
if (pos != std::string::npos) { if (pos != std::string::npos) {
fpath = fname_inp.substr(0, pos + 1); fpath = fname_inp.substr(0, pos + 1);
} }

View file

@ -719,7 +719,7 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n"); fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n"); fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n"); fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
fprintf(stdout, " -nommq, --no-mul-mat-q\n"); fprintf(stdout, " -nommq, --no-mul-mat-q\n");
fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n"); fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n"); fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n");

View file

@ -8,15 +8,15 @@ wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/s
# train # train
./bin/train-text-from-scratch \ ./bin/train-text-from-scratch \
--vocab-model ../models/ggml-vocab.bin \ --vocab-model ../models/ggml-vocab-llama.gguf \
--ctx 64 --embd 256 --head 8 --layer 16 \ --ctx 64 --embd 256 --head 8 --layer 16 \
--checkpoint-in chk-shakespeare-256x16.bin \ --checkpoint-in chk-shakespeare-256x16.gguf \
--checkpoint-out chk-shakespeare-256x16.bin \ --checkpoint-out chk-shakespeare-256x16.gguf \
--model-out ggml-shakespeare-256x16-f32.bin \ --model-out ggml-shakespeare-256x16-f32.gguf \
--train-data "shakespeare.txt" \ --train-data "shakespeare.txt" \
-t 6 -b 16 -n 32 --seed 1 --adam-iter 16 \ -t 6 -b 16 --seed 1 --adam-iter 256 \
--print-details-interval 0 --predict 16 --use-flash --no-checkpointing
# predict # predict
./bin/main -m ggml-shakespeare-256x16-f32.bin ./bin/main -m ggml-shakespeare-256x16-f32.gguf
``` ```

View file

@ -0,0 +1,492 @@
#!/usr/bin/env python3
# train-text-from-scratch checkpoint --> gguf conversion
import argparse
import gguf
import os
import struct
import sys
import numpy as np
from pathlib import Path
# gguf constants
LLM_KV_OPTIMIZER_TYPE = "optimizer.type"
LLM_KV_OPTIMIZER_TYPE_ADAM = "adam"
LLM_KV_OPTIMIZER_TYPE_LBFGS = "lbfgs"
LLM_KV_OPTIMIZER_FILE_VERSION = "optimizer.file_version"
LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT = "optimizer.convergence_past_count"
LLM_KV_OPTIMIZER_PARAMETER_COUNT = "optimizer.parameter_count"
LLM_KV_OPTIMIZER_ITERATION_COUNT = "optimizer.iteration_count"
LLM_KV_OPTIMIZER_JUST_INITIALIZED = "optimizer.just_initialized"
LLM_KV_OPTIMIZER_ADAM_BEST_LOSS = "optimizer.adam.best_loss"
LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS = "optimizer.adam.previous_loss"
LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT = "optimizer.adam.no_improvement_count"
LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT = "optimizer.lbfgs.approx_hessian_count"
LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS = "optimizer.lbfgs.best_loss"
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP = "optimizer.lbfgs.line_search_step"
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J = "optimizer.lbfgs.line_search_j"
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K = "optimizer.lbfgs.line_search_k"
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END = "optimizer.lbfgs.line_search_end"
LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT = "optimizer.lbfgs.no_improvement_count"
LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS = "optimizer.adam.first_moments"
LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS = "optimizer.adam.second_moments"
LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES = "optimizer.adam.past_loss_values"
LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS = "optimizer.lbfgs.current_parameters"
LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS = "optimizer.lbfgs.previous_parameters"
LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS = "optimizer.lbfgs.current_gradients"
LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS = "optimizer.lbfgs.previous_gradients"
LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION = "optimizer.lbfgs.search_direction"
LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES = "optimizer.lbfgs.past_loss_values"
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA = "optimizer.lbfgs.memory_alpha"
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS = "optimizer.lbfgs.memory_ys"
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S = "optimizer.lbfgs.memory_s"
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y = "optimizer.lbfgs.memory_y"
LLM_KV_TRAINING_FILE_VERSION = "training.file_version"
LLM_KV_TRAINING_ITERATION_COUNT = "training.iteration_count"
LLM_KV_TRAINING_SAMPLE_COUNT = "training.sample_count"
LLM_KV_TRAINING_TOKEN_COUNT = "training.token_count"
class Tensor:
def __init__(self, dtype='f', ne=None):
if ne is None:
ne = []
self.dtype = dtype
self.ne = ne
self.nbytes = 0
if self.dtype == 'f':
if len(self.ne) == 0:
self.nbytes = 0
else:
self.nbytes = int(np.product(self.ne)) * 4
else:
raise ValueError(f"Unhandled data type '{self.dtype}'")
def load(self, data, offset):
nd = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
namelen = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
dtype = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
assert(nd == len(self.ne))
ne = []
for d in range(nd):
n = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
ne.append(n)
assert(tuple(ne) == tuple(self.ne))
if self.dtype == 'f':
assert(dtype == 0)
else:
raise ValueError(f"Unhandled data type '{self.dtype}'")
self.name = bytes(data[offset:offset+namelen]); offset += namelen
# 32-byte alignment
offset += (0 - offset) & 31
self.data = data[offset:offset+self.nbytes]
offset += self.nbytes
return offset
def max_storage_size(self):
result = 0
result += 4 # nd
result += 4 # namelen
result += 4 # dtype
result += len(self.ne)*8 # ne
result += 48 # name (maximum as of commit 3b5515bbe0e2224425986ba24f1f5d84aa38dce9)
result += 31 # 32-byte alignment
result += self.nbytes
return result
def save_gguf(self, gguf_writer, name):
gguf_writer.add_tensor(
name=name,
tensor=self.data,
raw_shape=np.array(list(reversed(self.ne))),
raw_dtype=gguf.GGMLQuantizationType.F32)
class OptimizationParamsV0:
def __init__(self):
pass
def load(self, data, offset):
self.type = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.n_threads = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.past = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.delta = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.print_forward_graph = struct.unpack('<?', bytes(data[offset:offset + 1]))[0]; offset += 4 # 32bit-aligned
self.print_backward_graph = struct.unpack('<?', bytes(data[offset:offset + 1]))[0]; offset += 4 # 32bit-aligned
self.adam_n_iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_sched = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_decay = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_alpha = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_beta1 = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_beta2 = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_eps = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_eps_f = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_eps_g = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_m = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_n_iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_max_linesearch = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_eps = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_ftol = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_wolfe = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_min_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_max_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_linesearch = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
return offset
class OptimizationContext:
def __init__(self):
pass
def load(self, data, offset):
self.version = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]
offset += 4
if self.version == 0:
params = OptimizationParamsV0()
offset = params.load(data, offset)
self.past = params.past
self.lbfgs_m = params.lbfgs_m
self.nx = struct.unpack('N', bytes(data[offset:offset + 8]))[0]; offset += 8
self.iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.just_initialized = bool(struct.unpack('<i', bytes(data[offset:offset + 4]))[0]); offset += 4
self.type = params.type
self.adam_m = Tensor('f', [self.nx])
self.adam_v = Tensor('f', [self.nx])
self.adam_pf = Tensor('f', [self.past] if self.past > 0 else [])
self.lbfgs_x = Tensor('f', [self.nx])
self.lbfgs_xp = Tensor('f', [self.nx])
self.lbfgs_g = Tensor('f', [self.nx])
self.lbfgs_gp = Tensor('f', [self.nx])
self.lbfgs_d = Tensor('f', [self.nx])
self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else [])
self.lbfgs_lmal = Tensor('f', [self.lbfgs_m])
self.lbfgs_lmys = Tensor('f', [self.lbfgs_m])
self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m])
self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m])
if self.type == 0:
# these tensors are stored, but we don't need their data
x = Tensor('f', [self.nx])
g = Tensor('f', [self.nx])
g2 = Tensor('f', [self.nx])
mh = Tensor('f', [self.nx])
vh = Tensor('f', [self.nx])
offset = x.load(data, offset)
offset = g.load(data, offset)
offset = g2.load(data, offset)
offset = self.adam_m.load(data, offset)
offset = self.adam_v.load(data, offset)
offset = mh.load(data, offset)
offset = vh.load(data, offset)
offset = self.adam_pf.load(data, offset)
self.adam_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_fx_prev = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
elif self.type == 1:
offset = self.lbfgs_x.load(data, offset)
offset = self.lbfgs_xp.load(data, offset)
offset = self.lbfgs_g.load(data, offset)
offset = self.lbfgs_gp.load(data, offset)
offset = self.lbfgs_d.load(data, offset)
offset = self.lbfgs_pf.load(data, offset)
offset = self.lbfgs_lmal.load(data, offset)
offset = self.lbfgs_lmys.load(data, offset)
offset = self.lbfgs_lms.load(data, offset)
offset = self.lbfgs_lmy.load(data, offset)
self.lbfgs_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_j = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_k = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_end = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
else:
raise ValueError('Unknown optimizer type')
elif self.version == 1:
self.past = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_m = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.nx = struct.unpack('N', bytes(data[offset:offset + 8]))[0]; offset += 8
self.iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.just_initialized = bool(struct.unpack('<i', bytes(data[offset:offset + 4]))[0]); offset += 4
self.adam_m = Tensor('f', [self.nx])
self.adam_v = Tensor('f', [self.nx])
self.adam_pf = Tensor('f', [self.past] if self.past > 0 else [])
self.lbfgs_x = Tensor('f', [self.nx])
self.lbfgs_xp = Tensor('f', [self.nx])
self.lbfgs_g = Tensor('f', [self.nx])
self.lbfgs_gp = Tensor('f', [self.nx])
self.lbfgs_d = Tensor('f', [self.nx])
self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else [])
self.lbfgs_lmal = Tensor('f', [self.lbfgs_m])
self.lbfgs_lmys = Tensor('f', [self.lbfgs_m])
self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m])
self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m])
# forgot to save type in version 1:
# guess self.type from number of remaining bytes
size_type_0 = 12 + sum([t.max_storage_size() for t in
[self.adam_m, self.adam_v]
+([self.adam_pf] if (self.past > 0) else [])])
size_type_1 = 24 + sum([t.max_storage_size() for t in
[self.lbfgs_x, self.lbfgs_xp, self.lbfgs_g,
self.lbfgs_gp, self.lbfgs_d, self.lbfgs_pf,
self.lbfgs_lmal, self.lbfgs_lmys,
self.lbfgs_lms, self.lbfgs_lmy]
+([self.lbfgs_pf] if (self.past > 0) else [])])
# due to alignment padding the size might not by exact
# but the difference in size for both types is significant,
# so we can just use whichever is closest
remaining = len(data) - offset
if abs(remaining - size_type_0) < abs(remaining - size_type_1):
self.type = 0
else:
self.type = 1
if self.type == 0:
offset = self.adam_m.load(data, offset)
offset = self.adam_v.load(data, offset)
offset = self.adam_pf.load(data,offset)
self.adam_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_fx_prev = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.adam_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
elif self.type == 1:
offset = self.lbfgs_x.load(data, offset)
offset = self.lbfgs_xp.load(data, offset)
offset = self.lbfgs_g.load(data, offset)
offset = self.lbfgs_gp.load(data, offset)
offset = self.lbfgs_d.load(data, offset)
offset = self.lbfgs_pf.load(data, offset)
offset = self.lbfgs_lmal.load(data, offset)
offset = self.lbfgs_lmys.load(data, offset)
offset = self.lbfgs_lms.load(data, offset)
offset = self.lbfgs_lmy.load(data, offset)
self.lbfgs_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_j = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_k = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_end = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
self.lbfgs_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
else:
raise ValueError('Invalid version of checkpoint file')
return offset
def save_gguf(self, gguf_writer):
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_FILE_VERSION, 0)
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT, self.past)
gguf_writer.add_uint64(LLM_KV_OPTIMIZER_PARAMETER_COUNT, self.nx)
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_ITERATION_COUNT, self.iter)
gguf_writer.add_bool(LLM_KV_OPTIMIZER_JUST_INITIALIZED, self.just_initialized)
if self.type == 0:
gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_ADAM)
gguf_writer.add_float32(LLM_KV_OPTIMIZER_ADAM_BEST_LOSS, self.adam_fx_best)
gguf_writer.add_float32(LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS, self.adam_fx_prev)
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT, self.adam_n_no_improvement)
self.adam_m.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS)
self.adam_v.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS)
if self.past > 0:
self.adam_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES)
elif self.type == 1:
gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_LBFGS)
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT, self.lbfgs_m)
gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS, self.lbfgs_fx_best)
gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP, self.lbfgs_step)
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J, self.lbfgs_j)
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K, self.lbfgs_k)
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END, self.lbfgs_end)
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT, self.lbfgs_n_no_improvement)
self.lbfgs_x.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS)
self.lbfgs_xp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS)
self.lbfgs_g.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS)
self.lbfgs_gp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS)
self.lbfgs_d.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION)
if self.past > 0:
self.lbfgs_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES)
self.lbfgs_lmal.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA)
self.lbfgs_lmys.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS)
self.lbfgs_lms.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S)
self.lbfgs_lmy.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y)
else:
raise ValueError('Unknown optimizer type')
class ModelParams:
def __init__(self):
pass
def load(self, data, offset):
self.n_vocab = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.n_embd = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.n_mult = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.n_head = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.n_layer = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.n_rot = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
return offset
def get_n_ff(self):
# struct my_llama_model::get_n_ff in train-text-from-scratch.cpp commit 3b5515bbe0e2224425986ba24f1f5d84aa38dce9
return ((2*(4*self.n_embd)//3 + self.n_mult - 1)//self.n_mult)*self.n_mult
def save_gguf(self, gguf_writer):
# self.n_vocab not saved
gguf_writer.add_embedding_length(self.n_embd)
gguf_writer.add_head_count(self.n_head)
gguf_writer.add_block_count(self.n_layer)
gguf_writer.add_rope_dimension_count(self.n_rot)
gguf_writer.add_feed_forward_length(self.get_n_ff())
def tensor_name(key, bid=None):
return gguf.MODEL_TENSOR_NAMES[gguf.MODEL_ARCH.LLAMA][key].format(bid=bid) + ".weight"
class Layer:
def __init__(self, params, bid):
self.bid = bid
self.att_norm = Tensor('f', [params.n_embd])
self.wq = Tensor('f', [params.n_embd, params.n_embd])
self.wk = Tensor('f', [params.n_embd, params.n_embd])
self.wv = Tensor('f', [params.n_embd, params.n_embd])
self.wo = Tensor('f', [params.n_embd, params.n_embd])
self.ffn_norm = Tensor('f', [params.n_embd])
self.w1 = Tensor('f', [params.n_embd, params.get_n_ff()])
self.w2 = Tensor('f', [params.get_n_ff(), params.n_embd])
self.w3 = Tensor('f', [params.n_embd, params.get_n_ff()])
def load(self, data, offset):
offset = self.att_norm.load(data, offset)
offset = self.wq.load(data, offset)
offset = self.wk.load(data, offset)
offset = self.wv.load(data, offset)
offset = self.wo.load(data, offset)
offset = self.ffn_norm.load(data, offset)
offset = self.w1.load(data, offset)
offset = self.w2.load(data, offset)
offset = self.w3.load(data, offset)
return offset
def save_gguf(self, gguf_writer):
self.att_norm.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_NORM, self.bid))
self.wq.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_Q, self.bid))
self.wk.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_K, self.bid))
self.wv.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_V, self.bid))
self.wo.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, self.bid))
self.ffn_norm.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_NORM, self.bid))
self.w1.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_GATE, self.bid))
self.w2.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, self.bid))
self.w3.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_UP, self.bid))
class Model:
def __init__(self):
self.params = ModelParams()
self.layers = []
def load(self, data, offset):
offset = self.params.load(data, offset)
self.tok_embd = Tensor('f', [self.params.n_embd, self.params.n_vocab])
self.norm = Tensor('f', [self.params.n_embd])
self.output = Tensor('f', [self.params.n_embd, self.params.n_vocab])
offset = self.tok_embd.load(data, offset)
offset = self.norm.load(data, offset)
offset = self.output.load(data, offset)
self.layers.clear()
for bid in range(self.params.n_layer):
layer = Layer(self.params, bid)
offset = layer.load(data, offset)
self.layers.append(layer)
return offset
def save_gguf(self, gguf_writer):
self.params.save_gguf(gguf_writer)
self.tok_embd.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD))
self.norm.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT_NORM))
self.output.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT))
for layer in self.layers:
layer.save_gguf(gguf_writer)
class Checkpoint:
def __init__(self):
self.model = Model()
self.opt_ctx = OptimizationContext()
def load(self, data, offset):
magic = bytes(reversed(data[offset:offset + 4])); offset += 4
if magic != b'ggcp':
raise ValueError(f"File header magic indicates, that this is no checkpoint file. Expected 'ggcp', Got '{str(magic)}'")
self.version = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
if self.version != 0:
raise ValueError('Invalid version of checkpoint file')
self.train_its = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.train_samples = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
self.train_tokens = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
offset = self.model.load(data, offset)
offset = self.opt_ctx.load(data, offset)
return offset
def save_gguf(self, gguf_writer):
gguf_writer.add_file_type(gguf.GGMLQuantizationType.F32)
gguf_writer.add_layer_norm_rms_eps(1e-5)
gguf_writer.add_uint32(LLM_KV_TRAINING_FILE_VERSION, 0)
gguf_writer.add_uint32(LLM_KV_TRAINING_ITERATION_COUNT, self.train_its)
gguf_writer.add_uint32(LLM_KV_TRAINING_SAMPLE_COUNT, self.train_samples)
gguf_writer.add_uint32(LLM_KV_TRAINING_TOKEN_COUNT, self.train_tokens)
self.model.save_gguf(gguf_writer)
self.opt_ctx.save_gguf(gguf_writer)
def handle_args():
parser = argparse.ArgumentParser(description = 'Convert train-text-from-scratch checkpoints to GGUF')
parser.add_argument('--input', '-i', type = Path, help = 'Input train checkpoint filename', required=True)
parser.add_argument('--output', '-o', type = Path, help ='Output GGUF filename', required=True)
return parser.parse_args()
def main():
cfg = handle_args()
data = np.memmap(cfg.input, mode = 'r')
chk = Checkpoint()
offset = 0
offset = chk.load(data, offset)
# we should have read all available data
assert(offset == len(data))
gguf_writer = gguf.GGUFWriter(cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False)
chk.save_gguf(gguf_writer)
print(" gguf: write header")
gguf_writer.write_header_to_file()
print(" gguf: write metadata")
gguf_writer.write_kv_data_to_file()
print(" gguf: write tensors")
gguf_writer.write_tensors_to_file()
gguf_writer.close()
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load diff

View file

@ -107,6 +107,10 @@ static size_t ggml_allocator_get_alloc_size(struct ggml_allocr * alloc, struct g
} }
void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
#ifdef GGML_ALLOCATOR_DEBUG
GGML_ASSERT(ggml_is_view(tensor) == false); // views generally get data pointer from one of their sources
GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
#endif
size_t size = ggml_allocator_get_alloc_size(alloc, tensor); size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
size = aligned_offset(NULL, size, alloc->alignment); size = aligned_offset(NULL, size, alloc->alignment);
@ -268,7 +272,7 @@ struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment)
/*.parse_seq = */ {0}, /*.parse_seq = */ {0},
/*.parse_seq_len = */ 0, /*.parse_seq_len = */ 0,
#ifdef GGML_ALLOCATOR_DEBUG #ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0}, /*.allocated_tensors = */ {0},
#endif #endif
}; };
@ -297,7 +301,7 @@ struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
/*.parse_seq = */ {0}, /*.parse_seq = */ {0},
/*.parse_seq_len = */ 0, /*.parse_seq_len = */ 0,
#ifdef GGML_ALLOCATOR_DEBUG #ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0}, /*.allocated_tensors = */ {0},
#endif #endif
}; };
@ -556,7 +560,7 @@ static size_t ggml_allocator_alloc_graph_tensors_n(
struct ggml_tensor * view_src = get_view_source(parent); struct ggml_tensor * view_src = get_view_source(parent);
struct hash_node * view_src_hn = hash_get(ht, view_src); struct hash_node * view_src_hn = hash_get(ht, view_src);
view_src_hn->n_views -= 1; view_src_hn->n_views -= 1;
AT_PRINTF("view_src %s\n", view_src->name); AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) { if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
ggml_allocator_free_tensor(alloc, view_src); ggml_allocator_free_tensor(alloc, view_src);
} }

View file

@ -4908,8 +4908,8 @@ static void scale_f32_cuda(const float * x, float * dst, const float scale, cons
static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0, static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) { const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
GGML_ASSERT(nrows % 2 == 0); // GG: is this assert really needed? I don't see why GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, 2*CUDA_ROPE_BLOCK_SIZE, 1); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1); const dim3 block_nums(nrows, num_blocks_x, 1);
rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale); rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
@ -4917,7 +4917,8 @@ static void rope_f32_cuda(const float * x, float * dst, const int ncols, const i
static void rope_neox_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0, static void rope_neox_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) { const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
const dim3 block_dims(1, 2*CUDA_ROPE_BLOCK_SIZE, 1); GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1); const dim3 block_nums(nrows, num_blocks_x, 1);
rope_neox_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale); rope_neox_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);

View file

@ -24,6 +24,7 @@
// max memory buffers that can be mapped to the device // max memory buffers that can be mapped to the device
#define GGML_METAL_MAX_BUFFERS 16 #define GGML_METAL_MAX_BUFFERS 16
#define GGML_METAL_MAX_COMMAND_BUFFERS 32
struct ggml_tensor; struct ggml_tensor;
struct ggml_cgraph; struct ggml_cgraph;

View file

@ -11,6 +11,7 @@
#define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b))
// TODO: temporary - reuse llama.cpp logging
#ifdef GGML_METAL_NDEBUG #ifdef GGML_METAL_NDEBUG
#define metal_printf(...) #define metal_printf(...)
#else #else
@ -33,12 +34,15 @@ struct ggml_metal_buffer {
struct ggml_metal_context { struct ggml_metal_context {
int n_cb; int n_cb;
float * logits;
id<MTLDevice> device; id<MTLDevice> device;
id<MTLCommandQueue> queue; id<MTLCommandQueue> queue;
id<MTLLibrary> library; id<MTLLibrary> library;
id<MTLCommandBuffer> command_buffers [GGML_METAL_MAX_COMMAND_BUFFERS];
id<MTLComputeCommandEncoder> command_encoders[GGML_METAL_MAX_COMMAND_BUFFERS];
dispatch_queue_t d_queue;
int n_buffers; int n_buffers;
struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS]; struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
@ -110,16 +114,17 @@ static NSString * const msl_library_source = @"see metal.metal";
@end @end
struct ggml_metal_context * ggml_metal_init(int n_cb) { struct ggml_metal_context * ggml_metal_init(int n_cb) {
fprintf(stderr, "%s: allocating\n", __func__); metal_printf("%s: allocating\n", __func__);
struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context)); struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
ctx->n_cb = n_cb; ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
ctx->device = MTLCreateSystemDefaultDevice(); ctx->device = MTLCreateSystemDefaultDevice();
ctx->queue = [ctx->device newCommandQueue]; ctx->queue = [ctx->device newCommandQueue];
ctx->n_buffers = 0; ctx->n_buffers = 0;
ctx->concur_list_len = 0; ctx->concur_list_len = 0;
ctx->d_queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT);
#if 0 #if 0
// compile from source string and show compile log // compile from source string and show compile log
@ -128,7 +133,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error]; ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
if (error) { if (error) {
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]); metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
return NULL; return NULL;
} }
} }
@ -142,11 +147,11 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
//NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"]; //NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"];
NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
fprintf(stderr, "%s: loading '%s'\n", __func__, [path UTF8String]); metal_printf("%s: loading '%s'\n", __func__, [path UTF8String]);
NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error]; NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
if (error) { if (error) {
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]); metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
return NULL; return NULL;
} }
@ -158,7 +163,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error]; ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error];
#endif #endif
if (error) { if (error) {
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]); metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
return NULL; return NULL;
} }
} }
@ -170,11 +175,11 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
#define GGML_METAL_ADD_KERNEL(name) \ #define GGML_METAL_ADD_KERNEL(name) \
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \ ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \ ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
fprintf(stderr, "%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \ metal_printf("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \
(int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \ (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \
(int) ctx->pipeline_##name.threadExecutionWidth); \ (int) ctx->pipeline_##name.threadExecutionWidth); \
if (error) { \ if (error) { \
fprintf(stderr, "%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ metal_printf("%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
return NULL; \ return NULL; \
} }
@ -226,22 +231,80 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
#undef GGML_METAL_ADD_KERNEL #undef GGML_METAL_ADD_KERNEL
} }
fprintf(stderr, "%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); metal_printf("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
fprintf(stderr, "%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false"); metal_printf("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
if (ctx->device.maxTransferRate != 0) { if (ctx->device.maxTransferRate != 0) {
fprintf(stderr, "%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0); metal_printf("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
} else { } else {
fprintf(stderr, "%s: maxTransferRate = built-in GPU\n", __func__); metal_printf("%s: maxTransferRate = built-in GPU\n", __func__);
} }
return ctx; return ctx;
} }
void ggml_metal_free(struct ggml_metal_context * ctx) { void ggml_metal_free(struct ggml_metal_context * ctx) {
fprintf(stderr, "%s: deallocating\n", __func__); metal_printf("%s: deallocating\n", __func__);
#define GGML_METAL_DEL_KERNEL(name) \
[ctx->function_##name release]; \
[ctx->pipeline_##name release];
GGML_METAL_DEL_KERNEL(add);
GGML_METAL_DEL_KERNEL(add_row);
GGML_METAL_DEL_KERNEL(mul);
GGML_METAL_DEL_KERNEL(mul_row);
GGML_METAL_DEL_KERNEL(scale);
GGML_METAL_DEL_KERNEL(silu);
GGML_METAL_DEL_KERNEL(relu);
GGML_METAL_DEL_KERNEL(gelu);
GGML_METAL_DEL_KERNEL(soft_max);
GGML_METAL_DEL_KERNEL(diag_mask_inf);
GGML_METAL_DEL_KERNEL(get_rows_f16);
GGML_METAL_DEL_KERNEL(get_rows_q4_0);
GGML_METAL_DEL_KERNEL(get_rows_q4_1);
GGML_METAL_DEL_KERNEL(get_rows_q8_0);
GGML_METAL_DEL_KERNEL(get_rows_q2_K);
GGML_METAL_DEL_KERNEL(get_rows_q3_K);
GGML_METAL_DEL_KERNEL(get_rows_q4_K);
GGML_METAL_DEL_KERNEL(get_rows_q5_K);
GGML_METAL_DEL_KERNEL(get_rows_q6_K);
GGML_METAL_DEL_KERNEL(rms_norm);
GGML_METAL_DEL_KERNEL(norm);
GGML_METAL_DEL_KERNEL(mul_mat_f16_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q4_0_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q4_1_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q8_0_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q2_K_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q3_K_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q4_K_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q5_K_f32);
GGML_METAL_DEL_KERNEL(mul_mat_q6_K_f32);
GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q8_0_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q4_1_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q2_K_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q3_K_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
GGML_METAL_DEL_KERNEL(rope);
GGML_METAL_DEL_KERNEL(alibi_f32);
GGML_METAL_DEL_KERNEL(cpy_f32_f16);
GGML_METAL_DEL_KERNEL(cpy_f32_f32);
GGML_METAL_DEL_KERNEL(cpy_f16_f16);
#undef GGML_METAL_DEL_KERNEL
for (int i = 0; i < ctx->n_buffers; ++i) { for (int i = 0; i < ctx->n_buffers; ++i) {
[ctx->buffers[i].metal release]; [ctx->buffers[i].metal release];
} }
[ctx->library release];
[ctx->queue release];
[ctx->device release];
dispatch_release(ctx->d_queue);
free(ctx); free(ctx);
} }
@ -249,7 +312,7 @@ void * ggml_metal_host_malloc(size_t n) {
void * data = NULL; void * data = NULL;
const int result = posix_memalign((void **) &data, getpagesize(), n); const int result = posix_memalign((void **) &data, getpagesize(), n);
if (result != 0) { if (result != 0) {
fprintf(stderr, "%s: error: posix_memalign failed\n", __func__); metal_printf("%s: error: posix_memalign failed\n", __func__);
return NULL; return NULL;
} }
@ -261,7 +324,7 @@ void ggml_metal_host_free(void * data) {
} }
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) { void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
ctx->n_cb = n_cb; ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
} }
int ggml_metal_if_optimized(struct ggml_metal_context * ctx) { int ggml_metal_if_optimized(struct ggml_metal_context * ctx) {
@ -277,7 +340,7 @@ int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) {
// Metal buffer based on the host memory pointer // Metal buffer based on the host memory pointer
// //
static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) { static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) {
//fprintf(stderr, "%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach); //metal_printf("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
const int64_t tsize = ggml_nbytes(t); const int64_t tsize = ggml_nbytes(t);
@ -288,13 +351,13 @@ static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, stru
if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) { if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
*offs = (size_t) ioffs; *offs = (size_t) ioffs;
//fprintf(stderr, "%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs); //metal_printf("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
return ctx->buffers[i].metal; return ctx->buffers[i].metal;
} }
} }
fprintf(stderr, "%s: error: buffer is nil\n", __func__); metal_printf("%s: error: buffer is nil\n", __func__);
return nil; return nil;
} }
@ -306,7 +369,7 @@ bool ggml_metal_add_buffer(
size_t size, size_t size,
size_t max_size) { size_t max_size) {
if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) { if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) {
fprintf(stderr, "%s: too many buffers\n", __func__); metal_printf("%s: too many buffers\n", __func__);
return false; return false;
} }
@ -316,7 +379,7 @@ bool ggml_metal_add_buffer(
const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data; const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data;
if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) { if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) {
fprintf(stderr, "%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name); metal_printf("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name);
return false; return false;
} }
} }
@ -337,11 +400,11 @@ bool ggml_metal_add_buffer(
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
if (ctx->buffers[ctx->n_buffers].metal == nil) { if (ctx->buffers[ctx->n_buffers].metal == nil) {
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0); metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
return false; return false;
} }
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0); metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
++ctx->n_buffers; ++ctx->n_buffers;
} else { } else {
@ -361,27 +424,27 @@ bool ggml_metal_add_buffer(
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil]; ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
if (ctx->buffers[ctx->n_buffers].metal == nil) { if (ctx->buffers[ctx->n_buffers].metal == nil) {
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0); metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
return false; return false;
} }
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i); metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
if (i + size_step < size) { if (i + size_step < size) {
fprintf(stderr, "\n"); metal_printf("\n");
} }
++ctx->n_buffers; ++ctx->n_buffers;
} }
} }
fprintf(stderr, ", (%8.2f / %8.2f)", metal_printf(", (%8.2f / %8.2f)",
ctx->device.currentAllocatedSize / 1024.0 / 1024.0, ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) { if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) {
fprintf(stderr, ", warning: current allocated size is greater than the recommended max working set size\n"); metal_printf(", warning: current allocated size is greater than the recommended max working set size\n");
} else { } else {
fprintf(stderr, "\n"); metal_printf("\n");
} }
} }
@ -391,8 +454,6 @@ bool ggml_metal_add_buffer(
void ggml_metal_set_tensor( void ggml_metal_set_tensor(
struct ggml_metal_context * ctx, struct ggml_metal_context * ctx,
struct ggml_tensor * t) { struct ggml_tensor * t) {
metal_printf("%s: set input for tensor '%s'\n", __func__, t->name);
size_t offs; size_t offs;
id<MTLBuffer> id_dst = ggml_metal_get_buffer(ctx, t, &offs); id<MTLBuffer> id_dst = ggml_metal_get_buffer(ctx, t, &offs);
@ -402,8 +463,6 @@ void ggml_metal_set_tensor(
void ggml_metal_get_tensor( void ggml_metal_get_tensor(
struct ggml_metal_context * ctx, struct ggml_metal_context * ctx,
struct ggml_tensor * t) { struct ggml_tensor * t) {
metal_printf("%s: extract results for tensor '%s'\n", __func__, t->name);
size_t offs; size_t offs;
id<MTLBuffer> id_src = ggml_metal_get_buffer(ctx, t, &offs); id<MTLBuffer> id_src = ggml_metal_get_buffer(ctx, t, &offs);
@ -498,14 +557,14 @@ void ggml_metal_graph_find_concurrency(
} }
if (ctx->concur_list_len > GGML_MAX_CONCUR) { if (ctx->concur_list_len > GGML_MAX_CONCUR) {
fprintf(stderr, "%s: too many elements for metal ctx->concur_list!\n", __func__); metal_printf("%s: too many elements for metal ctx->concur_list!\n", __func__);
} }
} }
void ggml_metal_graph_compute( void ggml_metal_graph_compute(
struct ggml_metal_context * ctx, struct ggml_metal_context * ctx,
struct ggml_cgraph * gf) { struct ggml_cgraph * gf) {
metal_printf("%s: evaluating graph\n", __func__); @autoreleasepool {
// if there is ctx->concur_list, dispatch concurrently // if there is ctx->concur_list, dispatch concurrently
// else fallback to serial dispatch // else fallback to serial dispatch
@ -521,29 +580,25 @@ void ggml_metal_graph_compute(
const int n_cb = ctx->n_cb; const int n_cb = ctx->n_cb;
NSMutableArray * command_buffers = [NSMutableArray arrayWithCapacity:n_cb];
for (int i = 0; i < n_cb; ++i) { for (int i = 0; i < n_cb; ++i) {
command_buffers[i] = [ctx->queue commandBuffer]; ctx->command_buffers[i] = [ctx->queue commandBuffer];
// enqueue the command buffers in order to specify their execution order // enqueue the command buffers in order to specify their execution order
[command_buffers[i] enqueue]; [ctx->command_buffers[i] enqueue];
}
// TODO: is this the best way to start threads? ctx->command_encoders[i] = [ctx->command_buffers[i] computeCommandEncoderWithDescriptor: edesc];
dispatch_queue_t queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT); }
for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) { for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb; const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
dispatch_async(queue, ^{ dispatch_async(ctx->d_queue, ^{
size_t offs_src0 = 0; size_t offs_src0 = 0;
size_t offs_src1 = 0; size_t offs_src1 = 0;
size_t offs_dst = 0; size_t offs_dst = 0;
id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx]; id<MTLCommandBuffer> command_buffer = ctx->command_buffers[cb_idx];
id<MTLComputeCommandEncoder> encoder = ctx->command_encoders[cb_idx];
id<MTLComputeCommandEncoder> encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
const int node_start = (cb_idx + 0) * n_nodes_per_cb; const int node_start = (cb_idx + 0) * n_nodes_per_cb;
const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes); const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
@ -556,7 +611,7 @@ void ggml_metal_graph_compute(
continue; continue;
} }
metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op)); //metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
struct ggml_tensor * src0 = gf->nodes[i]->src[0]; struct ggml_tensor * src0 = gf->nodes[i]->src[0];
struct ggml_tensor * src1 = gf->nodes[i]->src[1]; struct ggml_tensor * src1 = gf->nodes[i]->src[1];
@ -704,7 +759,7 @@ void ggml_metal_graph_compute(
} break; } break;
default: default:
{ {
fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false); GGML_ASSERT(false);
} }
} break; } break;
@ -863,7 +918,7 @@ void ggml_metal_graph_compute(
} break; } break;
default: default:
{ {
fprintf(stderr, "Asserting on type %d\n",(int)src0t); metal_printf("Asserting on type %d\n",(int)src0t);
GGML_ASSERT(false && "not implemented"); GGML_ASSERT(false && "not implemented");
} }
}; };
@ -1101,7 +1156,7 @@ void ggml_metal_graph_compute(
} break; } break;
default: default:
{ {
fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false); GGML_ASSERT(false);
} }
} }
@ -1117,17 +1172,19 @@ void ggml_metal_graph_compute(
} }
// wait for all threads to finish // wait for all threads to finish
dispatch_barrier_sync(queue, ^{}); dispatch_barrier_sync(ctx->d_queue, ^{});
[command_buffers[n_cb - 1] waitUntilCompleted];
// check status of command buffers // check status of command buffers
// needed to detect if the device ran out-of-memory for example (#1881) // needed to detect if the device ran out-of-memory for example (#1881)
for (int i = 0; i < n_cb; i++) { for (int i = 0; i < n_cb; i++) {
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [command_buffers[i] status]; [ctx->command_buffers[i] waitUntilCompleted];
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status];
if (status != MTLCommandBufferStatusCompleted) { if (status != MTLCommandBufferStatusCompleted) {
fprintf(stderr, "%s: command buffer %d failed with status %lu\n", __func__, i, status); metal_printf("%s: command buffer %d failed with status %lu\n", __func__, i, status);
GGML_ASSERT(false); GGML_ASSERT(false);
} }
} }
}
} }

368
ggml.c
View file

@ -123,6 +123,8 @@ typedef void * thread_ret_t;
#define GGML_GELU_FP16 #define GGML_GELU_FP16
#define GGML_GELU_QUICK_FP16 #define GGML_GELU_QUICK_FP16
#define GGML_SILU_FP16 #define GGML_SILU_FP16
// #define GGML_CROSS_ENTROPY_EXP_FP16
// #define GGML_FLASH_ATTN_EXP_FP16
#define GGML_SOFT_MAX_UNROLL 4 #define GGML_SOFT_MAX_UNROLL 4
#define GGML_VEC_DOT_UNROLL 2 #define GGML_VEC_DOT_UNROLL 2
@ -157,12 +159,6 @@ typedef void * thread_ret_t;
//#define GGML_SOFT_MAX_ACCELERATE //#define GGML_SOFT_MAX_ACCELERATE
#endif #endif
#if UINTPTR_MAX == 0xFFFFFFFF
#define GGML_MEM_ALIGN 4
#else
#define GGML_MEM_ALIGN 16
#endif
// //
// logging // logging
// //
@ -192,8 +188,8 @@ typedef void * thread_ret_t;
// //
#if defined(_MSC_VER) || defined(__MINGW32__) #if defined(_MSC_VER) || defined(__MINGW32__)
#define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN) #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr) #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
#else #else
inline static void * ggml_aligned_malloc(size_t size) { inline static void * ggml_aligned_malloc(size_t size) {
void * aligned_memory = NULL; void * aligned_memory = NULL;
@ -218,8 +214,8 @@ inline static void * ggml_aligned_malloc(size_t size) {
} }
return aligned_memory; return aligned_memory;
} }
#define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size) #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
#define GGML_ALIGNED_FREE(ptr) free(ptr) #define GGML_ALIGNED_FREE(ptr) free(ptr)
#endif #endif
#define UNUSED GGML_UNUSED #define UNUSED GGML_UNUSED
@ -2436,7 +2432,6 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
const int nb = n / qk; const int nb = n / qk;
assert(n % qk == 0); assert(n % qk == 0);
assert(nb % 2 == 0);
const block_q4_0 * restrict x = vx; const block_q4_0 * restrict x = vx;
const block_q8_0 * restrict y = vy; const block_q8_0 * restrict y = vy;
@ -2445,6 +2440,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv0 = vdupq_n_f32(0.0f);
float32x4_t sumv1 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f);
GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
for (int i = 0; i < nb; i += 2) { for (int i = 0; i < nb; i += 2) {
const block_q4_0 * restrict x0 = &x[i + 0]; const block_q4_0 * restrict x0 = &x[i + 0];
const block_q4_0 * restrict x1 = &x[i + 1]; const block_q4_0 * restrict x1 = &x[i + 1];
@ -2623,6 +2619,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
} }
// Main loop // Main loop
GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
for (int i = 2; i < nb; i+=2) { for (int i = 2; i < nb; i+=2) {
_mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0); _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
_mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0); _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
@ -2706,7 +2703,6 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void *
const int nb = n / qk; const int nb = n / qk;
assert(n % qk == 0); assert(n % qk == 0);
assert(nb % 2 == 0);
const block_q4_1 * restrict x = vx; const block_q4_1 * restrict x = vx;
const block_q8_1 * restrict y = vy; const block_q8_1 * restrict y = vy;
@ -2718,6 +2714,7 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void *
float summs = 0; float summs = 0;
GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
for (int i = 0; i < nb; i += 2) { for (int i = 0; i < nb; i += 2) {
const block_q4_1 * restrict x0 = &x[i + 0]; const block_q4_1 * restrict x0 = &x[i + 0];
const block_q4_1 * restrict x1 = &x[i + 1]; const block_q4_1 * restrict x1 = &x[i + 1];
@ -2832,7 +2829,6 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void *
const int nb = n / qk; const int nb = n / qk;
assert(n % qk == 0); assert(n % qk == 0);
assert(nb % 2 == 0);
assert(qk == QK5_0); assert(qk == QK5_0);
const block_q5_0 * restrict x = vx; const block_q5_0 * restrict x = vx;
@ -2848,6 +2844,7 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void *
uint64_t tmp0[4]; uint64_t tmp0[4];
uint64_t tmp1[4]; uint64_t tmp1[4];
GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
for (int i = 0; i < nb; i += 2) { for (int i = 0; i < nb; i += 2) {
const block_q5_0 * restrict x0 = &x[i]; const block_q5_0 * restrict x0 = &x[i];
const block_q5_0 * restrict x1 = &x[i + 1]; const block_q5_0 * restrict x1 = &x[i + 1];
@ -3072,7 +3069,6 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void *
const int nb = n / qk; const int nb = n / qk;
assert(n % qk == 0); assert(n % qk == 0);
assert(nb % 2 == 0);
assert(qk == QK5_1); assert(qk == QK5_1);
const block_q5_1 * restrict x = vx; const block_q5_1 * restrict x = vx;
@ -3091,6 +3087,7 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void *
uint64_t tmp0[4]; uint64_t tmp0[4];
uint64_t tmp1[4]; uint64_t tmp1[4];
GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
for (int i = 0; i < nb; i += 2) { for (int i = 0; i < nb; i += 2) {
const block_q5_1 * restrict x0 = &x[i]; const block_q5_1 * restrict x0 = &x[i];
const block_q5_1 * restrict x1 = &x[i + 1]; const block_q5_1 * restrict x1 = &x[i + 1];
@ -3328,7 +3325,6 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void *
const int nb = n / qk; const int nb = n / qk;
assert(n % qk == 0); assert(n % qk == 0);
assert(nb % 2 == 0);
const block_q8_0 * restrict x = vx; const block_q8_0 * restrict x = vx;
const block_q8_0 * restrict y = vy; const block_q8_0 * restrict y = vy;
@ -3337,6 +3333,7 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void *
float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv0 = vdupq_n_f32(0.0f);
float32x4_t sumv1 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f);
GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
for (int i = 0; i < nb; i += 2) { for (int i = 0; i < nb; i += 2) {
const block_q8_0 * restrict x0 = &x[i + 0]; const block_q8_0 * restrict x0 = &x[i + 0];
const block_q8_0 * restrict x1 = &x[i + 1]; const block_q8_0 * restrict x1 = &x[i + 1];
@ -5862,7 +5859,8 @@ struct ggml_tensor * ggml_rms_norm_inplace(
struct ggml_tensor * ggml_rms_norm_back( struct ggml_tensor * ggml_rms_norm_back(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b) { struct ggml_tensor * b,
float eps) {
bool is_node = false; bool is_node = false;
if (a->grad) { if (a->grad) {
@ -5872,6 +5870,8 @@ struct ggml_tensor * ggml_rms_norm_back(
struct ggml_tensor * result = ggml_dup_tensor(ctx, a); struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
ggml_set_op_params(result, &eps, sizeof(eps));
result->op = GGML_OP_RMS_NORM_BACK; result->op = GGML_OP_RMS_NORM_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a; result->src[0] = a;
@ -7097,11 +7097,13 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0(
}; };
struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
ggml_set_op_params_i32(result, 0, stride);
result->op = GGML_OP_CONV_TRANSPOSE_2D; result->op = GGML_OP_CONV_TRANSPOSE_2D;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a; result->src[0] = a;
result->src[1] = b; result->src[1] = b;
result->src[2] = ggml_new_i32(ctx, stride);
return result; return result;
} }
@ -9446,6 +9448,8 @@ static void ggml_compute_forward_div_f32(
#ifdef GGML_USE_ACCELERATE #ifdef GGML_USE_ACCELERATE
UNUSED(ggml_vec_div_f32);
vDSP_vdiv( vDSP_vdiv(
(float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1, (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
(float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1, (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
@ -10752,7 +10756,8 @@ static void ggml_compute_forward_rms_norm_back_f32(
GGML_TENSOR_BINARY_OP_LOCALS; GGML_TENSOR_BINARY_OP_LOCALS;
const float eps = 1e-6f; // TODO: make this a parameter float eps;
memcpy(&eps, dst->op_params, sizeof(float));
// TODO: optimize // TODO: optimize
for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i03 = 0; i03 < ne03; i03++) {
@ -12142,6 +12147,7 @@ static void ggml_compute_forward_soft_max_back_f32(
// dx = J * dy // dx = J * dy
// dxk = sum_i(Jki * dyi) // dxk = sum_i(Jki * dyi)
// dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
// dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
// dxk = sum_i(-yk*yi * dyi) + yk*dyk // dxk = sum_i(-yk*yi * dyi) + yk*dyk
// dxk = -yk * sum_i(yi * dyi) + yk*dyk // dxk = -yk * sum_i(yi * dyi) + yk*dyk
// dxk = -yk * dot(y, dy) + yk*dyk // dxk = -yk * dot(y, dy) + yk*dyk
@ -13497,7 +13503,6 @@ static void ggml_compute_forward_conv_transpose_2d(
const struct ggml_compute_params * params, const struct ggml_compute_params * params,
const struct ggml_tensor * src0, const struct ggml_tensor * src0,
const struct ggml_tensor * src1, const struct ggml_tensor * src1,
const struct ggml_tensor * opt0,
struct ggml_tensor * dst) { struct ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32);
@ -13557,7 +13562,7 @@ static void ggml_compute_forward_conv_transpose_2d(
return; return;
} }
const int32_t stride = ((const int32_t*)(opt0->data))[0]; const int32_t stride = ggml_get_op_params_i32(dst, 0);
// total patches in dst // total patches in dst
const int np = ne2; const int np = ne2;
@ -13570,7 +13575,7 @@ static void ggml_compute_forward_conv_transpose_2d(
const int ip1 = MIN(ip0 + dp, np); const int ip1 = MIN(ip0 + dp, np);
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
ggml_fp16_t * const wdata_src = (ggml_fp16_t *) params->wdata + nk; ggml_fp16_t * const wdata_src = wdata + nk;
for (int i2 = ip0; i2 < ip1; i2++) { // Cout for (int i2 = ip0; i2 < ip1; i2++) { // Cout
float * dst_data = (float *)((char *) dst->data + i2*nb2); float * dst_data = (float *)((char *) dst->data + i2*nb2);
@ -13582,9 +13587,8 @@ static void ggml_compute_forward_conv_transpose_2d(
for (int i00 = 0; i00 < ne00; i00++) { for (int i00 = 0; i00 < ne00; i00++) {
float v = 0; float v = 0;
ggml_vec_dot_f16(ne03, &v, ggml_vec_dot_f16(ne03, &v,
(ggml_fp16_t *) wdata_src + i1n, wdata_src + i1n,
(ggml_fp16_t *) wdata_kernel + i01*ne00*ne03 + i00*ne03); wdata_kernel + i01*ne00*ne03 + i00*ne03);
dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v; dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
} }
} }
@ -13934,7 +13938,7 @@ static void ggml_compute_forward_flash_attn_f32(
vvexpf(S, S, &Mup); vvexpf(S, S, &Mup);
ggml_vec_sum_f32(Mup, &sum, S); ggml_vec_sum_f32(Mup, &sum, S);
#else #else
uint16_t scvt[GGML_SOFT_MAX_UNROLL]; uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
@ -13944,9 +13948,13 @@ static void ggml_compute_forward_flash_attn_f32(
if (SS[j] == -INFINITY) { if (SS[j] == -INFINITY) {
SS[j] = 0.0f; SS[j] = 0.0f;
} else { } else {
#ifndef GGML_FLASH_ATTN_EXP_FP16
const float val = expf(SS[j] - max);
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
memcpy(&scvt[j], &s, sizeof(uint16_t)); memcpy(&scvt[j], &s, sizeof(uint16_t));
const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
#endif
sump[j] += (ggml_float)val; sump[j] += (ggml_float)val;
SS[j] = val; SS[j] = val;
} }
@ -14524,7 +14532,7 @@ static void ggml_compute_forward_flash_attn_back_f32(
vvexpf(SM, SM, &Mup); vvexpf(SM, SM, &Mup);
ggml_vec_sum_f32(Mup, &sum, SM); ggml_vec_sum_f32(Mup, &sum, SM);
#else #else
uint16_t scvt[GGML_SOFT_MAX_UNROLL]; uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
@ -14535,9 +14543,13 @@ static void ggml_compute_forward_flash_attn_back_f32(
if (SR[j] == -INFINITY) { if (SR[j] == -INFINITY) {
SW[j] = 0.0f; SW[j] = 0.0f;
} else { } else {
#ifndef GGML_FLASH_ATTN_EXP_FP16
const float val = expf(SR[j] - max);
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max); ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
memcpy(&scvt[j], &s, sizeof(uint16_t)); memcpy(&scvt[j], &s, sizeof(uint16_t));
const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
#endif
sump[j] += (ggml_float)val; sump[j] += (ggml_float)val;
SW[j] = val; SW[j] = val;
} }
@ -15275,6 +15287,8 @@ static void ggml_compute_forward_cross_entropy_loss_f32(
const int nc = src0->ne[0]; const int nc = src0->ne[0];
const int nr = ggml_nrows(src0); const int nr = ggml_nrows(src0);
GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
if (params->type == GGML_TASK_INIT) { if (params->type == GGML_TASK_INIT) {
if (ith == 0) { if (ith == 0) {
memset(sums, 0, sizeof(float) * (nth + nth * nc)); memset(sums, 0, sizeof(float) * (nth + nth * nc));
@ -15286,7 +15300,7 @@ static void ggml_compute_forward_cross_entropy_loss_f32(
if (ith == 0) { if (ith == 0) {
float * dp = (float *) dst->data; float * dp = (float *) dst->data;
ggml_vec_sum_f32(nth, dp, sums); ggml_vec_sum_f32(nth, dp, sums);
dp[0] *= -1.0f; dp[0] *= -1.0f / (float) nr;
} }
return; return;
} }
@ -15303,7 +15317,7 @@ static void ggml_compute_forward_cross_entropy_loss_f32(
for (int i1 = ir0; i1 < ir1; i1++) { for (int i1 = ir0; i1 < ir1; i1++) {
float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]); float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]); float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
float * st = (float *) params->wdata + nth + ith*nc; float * st = ((float *) params->wdata) + nth + ith*nc;
#ifndef NDEBUG #ifndef NDEBUG
for (int i = 0; i < nc; ++i) { for (int i = 0; i < nc; ++i) {
@ -15318,15 +15332,19 @@ static void ggml_compute_forward_cross_entropy_loss_f32(
float max = -INFINITY; float max = -INFINITY;
ggml_vec_max_f32(nc, &max, s0); ggml_vec_max_f32(nc, &max, s0);
uint16_t scvt; uint16_t scvt; UNUSED(scvt);
for (int i = 0; i < nc; i++) { for (int i = 0; i < nc; i++) {
if (s0[i] == -INFINITY) { if (s0[i] == -INFINITY) {
st[i] = 0.0f; st[i] = 0.0f;
} else { } else {
// const float val = (s0[i] == -INFINITY) ? 0.0 : exp(s0[i] - max); #ifndef GGML_CROSS_ENTROPY_EXP_FP16
const float s = s0[i] - max;
const float val = expf(s);
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
memcpy(&scvt, &s, sizeof(scvt)); memcpy(&scvt, &s, sizeof(scvt));
const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
#endif
sum += (ggml_float)val; sum += (ggml_float)val;
st[i] = val; st[i] = val;
} }
@ -15342,7 +15360,9 @@ static void ggml_compute_forward_cross_entropy_loss_f32(
ggml_vec_log_f32(nc, st, st); ggml_vec_log_f32(nc, st, st);
ggml_vec_mul_f32(nc, st, st, s1); ggml_vec_mul_f32(nc, st, st, s1);
ggml_vec_sum_f32(nc, sums + ith, st); float st_sum = 0;
ggml_vec_sum_f32(nc, &st_sum, st);
sums[ith] += st_sum;
#ifndef NDEBUG #ifndef NDEBUG
for (int i = 0; i < nc; ++i) { for (int i = 0; i < nc; ++i) {
@ -15392,7 +15412,7 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
return; return;
} }
const float eps = 1e-9f; const double eps = 1e-9;
// TODO: handle transposed/permuted matrices // TODO: handle transposed/permuted matrices
const int64_t nc = src0->ne[0]; const int64_t nc = src0->ne[0];
@ -15411,7 +15431,6 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]); float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]); float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]); float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
float * sm = (float *) params->wdata + ith*nc;
#ifndef NDEBUG #ifndef NDEBUG
for (int i = 0; i < nc; ++i) { for (int i = 0; i < nc; ++i) {
@ -15420,54 +15439,6 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
assert(!isnan(s1[i])); assert(!isnan(s1[i]));
} }
#endif #endif
// step by step explanation:
{
//float * sums = (float *) params->wdata;
// forward pass with annotated gradients from backward pass
// (built by going in reverse operation order, adding to gradients of current operation args)
// st0 = exp(s0-max(s0)) grad[st0] = grad[st1]*(1.0 - eps)/sum
// from softmax_back: grad[s0] = st1_k * (grad[st1]_k - dot(st1, grad[st1]))
// ggml_vec_scale_f32(nc, st, sum); // st1 = st0*/sum = softmax(s0) grad[st1] = grad[st2]*(1.0 - eps)
// ggml_vec_scale_f32(nc, st, (1.0f - eps)); // st2 = st1*(1.0 - eps) grad[st2] = grad[st3]
// ggml_vec_add1_f32(nc, st, st, eps); // st3 = st2 + eps grad[st3] = grad[st4]/st3
// ggml_vec_log_f32(nc, st, st); // st4 = log(st3) grad[st4] = grad[st5] * s1
// ggml_vec_mul_f32(nc, st, st, s1); // st5 = st4 * s1 grad[st5] = grad[sums[ith]]
// ggml_vec_sum_f32(nc, sums + ith, st); // sums[ith] = st5 grad[sums[ith]] = grad[cross_entropy_loss] = -grad[cel]
// substitute into grad[st1], because we can reuse softmax_back from this point on
// grad[st1] = -grad[cel]*s1*(1.0 - eps)/(eps + softmax(s0)*(1.0 - eps))
// postorder:
// grad[st1] := softmax(s0)
// grad[st1] := grad[st1]*(1.0 - eps)
// grad[st1] := grad[st1] + eps
// grad[st1] := s1 / grad[st1]
// grad[st1] := grad[st1]*(1.0-eps)*-grad[cel]
// src0 gradients by going through softmax_back
// grad[s0] = st1_k * (grad[st1]_k - dot(st1, grad[st1]))
// from softmax_back:
// dxk = yk * (dyk - dot(y, dy))
// dot_y_dy := dot(y, dy)
// dx := dy
// dx := dx - dot_y_dy
// dx := dx * y
// postorder:
// dot_st1_dst1 := dot(st1, grad[st1])
// grad[s0] := grad[st1]
// grad[s0] := grad[s0] - dot_st1_dst1
// grad[s0] := grad[s0] * st1
// prepend postorder from grad[st1] directly using grad[s0] as memory location, as we will grad[s0] := grad[st1]
// sm := softmax(s0)
// grad[s0] := sm*(1.0 - eps)
// grad[s0] := grad[s0] + eps
// grad[s0] := s1 / grad[s0]
// grad[s0] := grad[s0]*(1.0-eps)*-grad[cel]
// dot_st1_dst1 := dot(sm, grad[s0])
// grad[s0] := grad[s0] - dot_st1_dst1
// grad[s0] := grad[s0] * sm
}
// soft_max // soft_max
ggml_float sum = 0.0; ggml_float sum = 0.0;
@ -15475,39 +15446,37 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
float max = -INFINITY; float max = -INFINITY;
ggml_vec_max_f32(nc, &max, s0); ggml_vec_max_f32(nc, &max, s0);
uint16_t scvt; uint16_t scvt; UNUSED(scvt);
for (int i = 0; i < nc; i++) { for (int i = 0; i < nc; i++) {
if (s0[i] == -INFINITY) { if (s0[i] == -INFINITY) {
sm[i] = 0.0f; ds0[i] = 0.0f;
} else { } else {
// const float val = (s0[i] == -INFINITY) ? 0.0 : exp(s0[i] - max); #ifndef GGML_CROSS_ENTROPY_EXP_FP16
const float s = s0[i] - max;
const float val = expf(s);
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
memcpy(&scvt, &s, sizeof(scvt)); memcpy(&scvt, &s, sizeof(scvt));
const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
#endif
sum += (ggml_float)val; sum += (ggml_float)val;
sm[i] = val; ds0[i] = val;
} }
} }
assert(sum > 0.0); assert(sum > 0.0);
sum = 1.0/sum; sum = (1.0 - eps)/sum;
} }
float dot_st1_dst1 = 0; // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
ggml_vec_scale_f32(nc, sm, sum); ggml_vec_scale_f32(nc, ds0, sum);
ggml_vec_cpy_f32 (nc, ds0, sm); ggml_vec_add1_f32(nc, ds0, ds0, eps);
ggml_vec_scale_f32(nc, ds0, (1.0f - eps)); ggml_vec_sub_f32(nc, ds0, ds0, s1);
ggml_vec_add1_f32 (nc, ds0, ds0, eps); ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
ggml_vec_div_f32 (nc, ds0, s1, ds0);
ggml_vec_scale_f32(nc, ds0, -(1.0f - eps)*d[0]);
ggml_vec_dot_f32 (nc, &dot_st1_dst1, sm, ds0);
ggml_vec_acc1_f32 (nc, ds0, -dot_st1_dst1);
ggml_vec_mul_f32 (nc, ds0, ds0, sm);
#ifndef NDEBUG #ifndef NDEBUG
for (int i = 0; i < nc; ++i) { for (int i = 0; i < nc; ++i) {
assert(!isnan(sm[i]));
assert(!isinf(sm[i]));
assert(!isnan(ds0[i])); assert(!isnan(ds0[i]));
assert(!isinf(ds0[i])); assert(!isinf(ds0[i]));
} }
@ -15731,7 +15700,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
} break; } break;
case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_CONV_TRANSPOSE_2D:
{ {
ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
} break; } break;
case GGML_OP_POOL_1D: case GGML_OP_POOL_1D:
{ {
@ -16062,9 +16031,12 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
{ {
// necessary for llama // necessary for llama
if (src0->grad) { if (src0->grad) {
float eps;
memcpy(&eps, tensor->op_params, sizeof(float));
src0->grad = ggml_add_impl(ctx, src0->grad = ggml_add_impl(ctx,
src0->grad, src0->grad,
ggml_rms_norm_back(ctx, src0, tensor->grad), ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
inplace); inplace);
} }
} break; } break;
@ -16832,9 +16804,7 @@ struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
return result; return result;
} }
struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
struct ggml_cgraph result = *gf;
GGML_ASSERT(gf->n_nodes > 0); GGML_ASSERT(gf->n_nodes > 0);
// if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
@ -16858,15 +16828,19 @@ struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cg
} }
} }
for (int i = gf->n_nodes - 1; i >= 0; i--) { for (int i = 0; i < gf->n_nodes; i++) {
struct ggml_tensor * node = gf->nodes[i]; struct ggml_tensor * node = gf->nodes[i];
if (node->is_param) { if (node->is_param) {
GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
ggml_build_forward_expand(&result, node->grad); ggml_build_forward_expand(gb, node->grad);
} }
} }
}
struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
struct ggml_cgraph result = *gf;
ggml_build_backward_expand(ctx, gf, &result, keep);
return result; return result;
} }
@ -17542,10 +17516,6 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
case GGML_OP_CROSS_ENTROPY_LOSS_BACK: case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
{ {
n_tasks = n_threads; n_tasks = n_threads;
size_t cur = ggml_type_size(node->type)*node->src[0]->ne[0]*n_tasks;
work_size = MAX(work_size, cur);
} break; } break;
case GGML_OP_NONE: case GGML_OP_NONE:
{ {
@ -18423,14 +18393,16 @@ static enum ggml_opt_result ggml_opt_adam(
struct ggml_opt_params params, struct ggml_opt_params params,
struct ggml_tensor * f, struct ggml_tensor * f,
struct ggml_cgraph * gf, struct ggml_cgraph * gf,
struct ggml_cgraph * gb) { struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data) {
GGML_ASSERT(ggml_is_scalar(f)); GGML_ASSERT(ggml_is_scalar(f));
// these will store the parameters we want to optimize // these will store the parameters we want to optimize
struct ggml_tensor * ps[GGML_MAX_PARAMS]; struct ggml_tensor * ps[GGML_MAX_PARAMS];
int np = 0; int np = 0;
int nx = 0; int64_t nx = 0;
for (int i = 0; i < gf->n_nodes; ++i) { for (int i = 0; i < gf->n_nodes; ++i) {
if (gf->nodes[i]->is_param) { if (gf->nodes[i]->is_param) {
GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
@ -18449,31 +18421,32 @@ static enum ggml_opt_result ggml_opt_adam(
} }
// constants // constants
const float sched = params.adam.sched; float sched = params.adam.sched;
const float decay = params.adam.decay * sched; const float alpha = params.adam.alpha;
const float alpha = params.adam.alpha * sched; const float decay = params.adam.decay * alpha;
const float beta1 = params.adam.beta1; const float beta1 = params.adam.beta1;
const float beta2 = params.adam.beta2; const float beta2 = params.adam.beta2;
const float eps = params.adam.eps; const float eps = params.adam.eps;
const float gclip = params.adam.gclip;
const int decay_min_ndim = params.adam.decay_min_ndim;
float * x = opt->adam.x->data; // view of the parameters
float * g1 = opt->adam.g1->data; // gradient
float * g2 = opt->adam.g2->data; // gradient squared
float * m = opt->adam.m->data; // first moment float * m = opt->adam.m->data; // first moment
float * v = opt->adam.v->data; // second moment float * v = opt->adam.v->data; // second moment
float * mh = opt->adam.mh->data; // first moment hat
float * vh = opt->adam.vh->data; // second moment hat
float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
// update view if (callback) {
ggml_opt_get_params(np, ps, x); callback(callback_data, &sched);
}
// compute the function value // compute the function value
ggml_graph_reset (gf); ggml_graph_reset (gf);
ggml_set_f32 (f->grad, 1.0f); ggml_set_f32 (f->grad, 1.0f);
ggml_graph_compute_with_ctx(ctx, gb, params.n_threads); struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
ggml_graph_compute(gb, &cplan);
opt->adam.fx_prev = ggml_get_f32_1d(f, 0); opt->adam.fx_prev = ggml_get_f32_1d(f, 0);
opt->adam.fx_best = opt->adam.fx_prev; opt->adam.fx_best = opt->adam.fx_prev;
@ -18481,6 +18454,9 @@ static enum ggml_opt_result ggml_opt_adam(
pf[opt->iter % params.past] = opt->adam.fx_prev; pf[opt->iter % params.past] = opt->adam.fx_prev;
} }
opt->loss_before = opt->adam.fx_prev;
opt->loss_after = opt->adam.fx_prev;
// initialize // initialize
if (opt->just_initialized) { if (opt->just_initialized) {
opt->adam.n_no_improvement = 0; opt->adam.n_no_improvement = 0;
@ -18513,50 +18489,55 @@ static enum ggml_opt_result ggml_opt_adam(
UNUSED(t_start_cpu); UNUSED(t_start_cpu);
{ {
// update the gradient float gnorm = 1.0f;
ggml_opt_get_grad(np, ps, g1); if (gclip > 0.0f) {
// gradient clipping
ggml_float sum = 0.0;
for (int p = 0; p < np; ++p) {
const int64_t ne = ggml_nelements(ps[p]);
for (int64_t j = 0; j < ne; ++j) {
float g = ggml_get_f32_1d(ps[p]->grad, j);
sum += (ggml_float)(g*g);
}
}
ggml_float norm = sqrt(sum);
if (norm > (ggml_float) gclip) {
gnorm = (float) ((ggml_float) gclip / norm);
}
}
const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
int64_t i = 0;
for (int p = 0; p < np; ++p) {
const int64_t ne = ggml_nelements(ps[p]);
const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched;
for (int64_t j = 0; j < ne; ++j) {
float x = ggml_get_f32_1d(ps[p], j);
float g = ggml_get_f32_1d(ps[p]->grad, j)*gnorm;
m[i] = m[i]*beta1 + g*(1.0f - beta1);
v[i] = v[i]*beta2 + g*g*(1.0f - beta2);
float mh = m[i]*beta1h;
float vh = v[i]*beta2h;
vh = sqrtf(vh) + eps;
x = x*(1.0f - p_decay) - mh/vh;
ggml_set_f32_1d(ps[p], j, x);
++i;
}
}
}
// m_t = beta1*m_t-1 + (1 - beta1)*g_t if (callback) {
ggml_vec_scale_f32(nx, m, beta1); callback(callback_data, &sched);
ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1);
// g2 = g1^2
ggml_vec_sqr_f32 (nx, g2, g1);
// v_t = beta2*v_t-1 + (1 - beta2)*g_t^2
ggml_vec_scale_f32(nx, v, beta2);
ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2);
// m^hat = m_t / (1 - beta1^t)
// v^hat = v_t / (1 - beta2^t)
// x_t = x_t-1 - sched*(alpha*m^hat/(sqrt(v^hat) + eps) + decay*x_t-1)
// x_t = x_t-1 - sched*alpha*m^hat/(sqrt(v^hat) + eps) - sched*decay*x_t-1
// x_t = x_t-1*(1-sched*decay) - sched*alpha*m^hat/(sqrt(v^hat) + eps)
// x_t = x_t-1*(1-sched*decay) + sched*decay*(-alpha/decay)*m^hat/(sqrt(v^hat) + eps)
// x_t = mix(x_t-1, (-alpha/decay)*m^hat/(sqrt(v^hat) + eps), sched*decay)
ggml_vec_cpy_f32 (nx, mh, m);
ggml_vec_cpy_f32 (nx, vh, v);
ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, opt->iter)));
ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, opt->iter)));
ggml_vec_sqrt_f32 (nx, vh, vh);
ggml_vec_acc1_f32 (nx, vh, eps);
ggml_vec_div_f32 (nx, mh, mh, vh);
ggml_vec_scale_f32(nx, x, 1.0f - decay);
ggml_vec_sub_f32 (nx, x, x, mh);
// update the parameters
ggml_opt_set_params(np, ps, x);
} }
ggml_graph_reset (gf); ggml_graph_reset (gf);
ggml_set_f32 (f->grad, 1.0f); ggml_set_f32 (f->grad, 1.0f);
ggml_graph_compute_with_ctx(ctx, gb, params.n_threads); ggml_graph_compute(gb, &cplan);
const float fx = ggml_get_f32_1d(f, 0); const float fx = ggml_get_f32_1d(f, 0);
opt->loss_after = fx;
// check convergence // check convergence
if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) { if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
@ -18625,7 +18606,6 @@ struct ggml_lbfgs_iteration_data {
}; };
static enum ggml_opt_result linesearch_backtracking( static enum ggml_opt_result linesearch_backtracking(
struct ggml_context * ctx,
const struct ggml_opt_params * params, const struct ggml_opt_params * params,
int nx, int nx,
float * x, float * x,
@ -18637,8 +18617,11 @@ static enum ggml_opt_result linesearch_backtracking(
struct ggml_tensor * f, struct ggml_tensor * f,
struct ggml_cgraph * gf, struct ggml_cgraph * gf,
struct ggml_cgraph * gb, struct ggml_cgraph * gb,
struct ggml_cplan * cplan,
const int np, const int np,
struct ggml_tensor * ps[]) { struct ggml_tensor * ps[],
ggml_opt_callback callback,
void * callback_data) {
int count = 0; int count = 0;
float width = 0.0f; float width = 0.0f;
@ -18667,6 +18650,12 @@ static enum ggml_opt_result linesearch_backtracking(
dgtest = params->lbfgs.ftol*dginit; dgtest = params->lbfgs.ftol*dginit;
while (true) { while (true) {
if (callback) {
// LBFG-S does not support learning rate -> ignore learning schedule
float sched = 0;
callback(callback_data, &sched);
}
ggml_vec_cpy_f32(nx, x, xp); ggml_vec_cpy_f32(nx, x, xp);
ggml_vec_mad_f32(nx, x, d, *step); ggml_vec_mad_f32(nx, x, d, *step);
@ -18677,7 +18666,7 @@ static enum ggml_opt_result linesearch_backtracking(
ggml_graph_reset (gf); ggml_graph_reset (gf);
ggml_set_f32 (f->grad, 1.0f); ggml_set_f32 (f->grad, 1.0f);
ggml_graph_compute_with_ctx(ctx, gb, params->n_threads); ggml_graph_compute(gb, cplan);
ggml_opt_get_grad(np, ps, g); ggml_opt_get_grad(np, ps, g);
@ -18737,7 +18726,9 @@ static enum ggml_opt_result ggml_opt_lbfgs(
struct ggml_opt_params params, struct ggml_opt_params params,
struct ggml_tensor * f, struct ggml_tensor * f,
struct ggml_cgraph * gf, struct ggml_cgraph * gf,
struct ggml_cgraph * gb) { struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data) {
if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE || if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) { if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
@ -18769,6 +18760,10 @@ static enum ggml_opt_result ggml_opt_lbfgs(
opt->iter = iter; opt->iter = iter;
} }
struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
float * x = opt->lbfgs.x->data; // current parameters float * x = opt->lbfgs.x->data; // current parameters
float * xp = opt->lbfgs.xp->data; // previous parameters float * xp = opt->lbfgs.xp->data; // previous parameters
float * g = opt->lbfgs.g->data; // current gradient float * g = opt->lbfgs.g->data; // current gradient
@ -18790,6 +18785,12 @@ static enum ggml_opt_result ggml_opt_lbfgs(
float * lm_s = opt->lbfgs.lms->data; float * lm_s = opt->lbfgs.lms->data;
float * lm_y = opt->lbfgs.lmy->data; float * lm_y = opt->lbfgs.lmy->data;
if (callback) {
// LBFG-S does not support learning rate -> ignore learning schedule
float sched = 0;
callback(callback_data, &sched);
}
// evaluate the function value and its gradient // evaluate the function value and its gradient
{ {
ggml_opt_set_params(np, ps, x); ggml_opt_set_params(np, ps, x);
@ -18797,11 +18798,14 @@ static enum ggml_opt_result ggml_opt_lbfgs(
ggml_graph_reset (gf); ggml_graph_reset (gf);
ggml_set_f32 (f->grad, 1.0f); ggml_set_f32 (f->grad, 1.0f);
ggml_graph_compute_with_ctx(ctx, gb, params.n_threads); ggml_graph_compute(gb, &cplan);
ggml_opt_get_grad(np, ps, g); ggml_opt_get_grad(np, ps, g);
fx = ggml_get_f32_1d(f, 0); fx = ggml_get_f32_1d(f, 0);
opt->loss_before = fx;
opt->loss_after = fx;
} }
// search direction = -gradient // search direction = -gradient
@ -18856,7 +18860,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
ggml_vec_cpy_f32(nx, xp, x); ggml_vec_cpy_f32(nx, xp, x);
ggml_vec_cpy_f32(nx, gp, g); ggml_vec_cpy_f32(nx, gp, g);
ls = linesearch_backtracking(ctx, &params, nx, x, &fx, g, d, step, xp, f, gf, gb, np, ps); ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gf, gb, &cplan, np, ps, callback, callback_data);
if (ls < 0) { if (ls < 0) {
// linesearch failed - go back to the previous point and return // linesearch failed - go back to the previous point and return
@ -18866,6 +18870,8 @@ static enum ggml_opt_result ggml_opt_lbfgs(
return ls; return ls;
} }
opt->loss_after = fx;
ggml_vec_norm_f32(nx, &xnorm, x); ggml_vec_norm_f32(nx, &xnorm, x);
ggml_vec_norm_f32(nx, &gnorm, g); ggml_vec_norm_f32(nx, &gnorm, g);
@ -18923,7 +18929,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
// ys = y^t \cdot s -> 1 / \rho. // ys = y^t \cdot s -> 1 / \rho.
// yy = y^t \cdot y. // yy = y^t \cdot y.
// //
ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0] *nx]); ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]); ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
lm_ys[end[0]] = ys; lm_ys[end[0]] = ys;
@ -18986,13 +18992,15 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
.adam = { .adam = {
.n_iter = 10000, .n_iter = 10000,
.sched = 1.000f, .sched = 1.000f,
.decay = 0.001f, .decay = 0.0f,
.decay_min_ndim = 2,
.alpha = 0.001f, .alpha = 0.001f,
.beta1 = 0.9f, .beta1 = 0.9f,
.beta2 = 0.999f, .beta2 = 0.999f,
.eps = 1e-8f, .eps = 1e-8f,
.eps_f = 1e-5f, .eps_f = 1e-5f,
.eps_g = 1e-3f, .eps_g = 1e-3f,
.gclip = 0.0f,
}, },
}; };
} break; } break;
@ -19042,23 +19050,13 @@ GGML_API void ggml_opt_init(
switch (opt->params.type) { switch (opt->params.type) {
case GGML_OPT_ADAM: case GGML_OPT_ADAM:
{ {
opt->adam.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx); opt->adam.m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx); opt->adam.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
opt->adam.pf = params.past > 0 opt->adam.pf = params.past > 0
? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past) ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
: NULL; : NULL;
ggml_set_zero(opt->adam.x);
ggml_set_zero(opt->adam.g1);
ggml_set_zero(opt->adam.g2);
ggml_set_zero(opt->adam.m); ggml_set_zero(opt->adam.m);
ggml_set_zero(opt->adam.v); ggml_set_zero(opt->adam.v);
ggml_set_zero(opt->adam.mh);
ggml_set_zero(opt->adam.vh);
if (opt->adam.pf) { if (opt->adam.pf) {
ggml_set_zero(opt->adam.pf); ggml_set_zero(opt->adam.pf);
} }
@ -19142,7 +19140,7 @@ enum ggml_opt_result ggml_opt_resume(
*gf = ggml_build_forward (f); *gf = ggml_build_forward (f);
*gb = ggml_build_backward(ctx, gf, true); *gb = ggml_build_backward(ctx, gf, true);
return ggml_opt_resume_g(ctx, opt, f, gf, gb); return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
} }
enum ggml_opt_result ggml_opt_resume_g( enum ggml_opt_result ggml_opt_resume_g(
@ -19150,7 +19148,9 @@ enum ggml_opt_result ggml_opt_resume_g(
struct ggml_opt_context * opt, struct ggml_opt_context * opt,
struct ggml_tensor * f, struct ggml_tensor * f,
struct ggml_cgraph * gf, struct ggml_cgraph * gf,
struct ggml_cgraph * gb) { struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data) {
// build forward + backward compute graphs // build forward + backward compute graphs
enum ggml_opt_result result = GGML_OPT_OK; enum ggml_opt_result result = GGML_OPT_OK;
@ -19158,11 +19158,11 @@ enum ggml_opt_result ggml_opt_resume_g(
switch (opt->params.type) { switch (opt->params.type) {
case GGML_OPT_ADAM: case GGML_OPT_ADAM:
{ {
result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb); result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
} break; } break;
case GGML_OPT_LBFGS: case GGML_OPT_LBFGS:
{ {
result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb); result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
} break; } break;
} }
@ -19617,7 +19617,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
// read the kv pairs // read the kv pairs
{ {
ctx->kv = GGML_ALIGNED_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv)); ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
struct gguf_kv * kv = &ctx->kv[i]; struct gguf_kv * kv = &ctx->kv[i];
@ -19700,7 +19700,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
// read the tensor infos // read the tensor infos
{ {
ctx->infos = GGML_ALIGNED_MALLOC(ctx->header.n_tensors * sizeof(struct gguf_tensor_info)); ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
struct gguf_tensor_info * info = &ctx->infos[i]; struct gguf_tensor_info * info = &ctx->infos[i];
@ -19901,7 +19901,7 @@ void gguf_free(struct gguf_context * ctx) {
} }
} }
GGML_ALIGNED_FREE(ctx->kv); free(ctx->kv);
} }
if (ctx->infos) { if (ctx->infos) {
@ -19913,7 +19913,7 @@ void gguf_free(struct gguf_context * ctx) {
} }
} }
GGML_ALIGNED_FREE(ctx->infos); free(ctx->infos);
} }
GGML_ALIGNED_FREE(ctx); GGML_ALIGNED_FREE(ctx);

47
ggml.h
View file

@ -130,13 +130,16 @@
// The data of the tensor is accessed via the "data" pointer. For example: // The data of the tensor is accessed via the "data" pointer. For example:
// //
// { // {
// struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3); // const int nx = 2;
// const int ny = 3;
// //
// // a[2, 1] = 1.0f; // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny);
// *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
// //
// // a[0, 2] = 2.0f; // for (int y = 0; y < ny; y++) {
// *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f; // for (int x = 0; x < nx; x++) {
// *(float *) ((char *) a->data + y*a->nb[1] + x*a->nb[0]) = x + y;
// }
// }
// //
// ... // ...
// } // }
@ -211,6 +214,11 @@
#define GGML_MAX_OP_PARAMS 32 #define GGML_MAX_OP_PARAMS 32
#define GGML_DEFAULT_N_THREADS 4 #define GGML_DEFAULT_N_THREADS 4
#if UINTPTR_MAX == 0xFFFFFFFF
#define GGML_MEM_ALIGN 4
#else
#define GGML_MEM_ALIGN 16
#endif
#define GGML_EXIT_SUCCESS 0 #define GGML_EXIT_SUCCESS 0
#define GGML_EXIT_ABORTED 1 #define GGML_EXIT_ABORTED 1
@ -944,11 +952,11 @@ extern "C" {
// a - x // a - x
// b - dy // b - dy
// TODO: update with configurable eps
GGML_API struct ggml_tensor * ggml_rms_norm_back( GGML_API struct ggml_tensor * ggml_rms_norm_back(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b,
float eps);
// A: n columns, m rows // A: n columns, m rows
// B: n columns, p rows (i.e. we transpose it internally) // B: n columns, p rows (i.e. we transpose it internally)
@ -1604,7 +1612,8 @@ extern "C" {
struct ggml_tensor * tensor); struct ggml_tensor * tensor);
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
@ -1669,6 +1678,8 @@ extern "C" {
GGML_LINESEARCH_INVALID_PARAMETERS, GGML_LINESEARCH_INVALID_PARAMETERS,
}; };
typedef void (*ggml_opt_callback)(void * data, float * sched);
// optimization parameters // optimization parameters
// //
// see ggml.c (ggml_opt_default_params) for default values // see ggml.c (ggml_opt_default_params) for default values
@ -1704,12 +1715,14 @@ extern "C" {
float sched; // schedule multiplier (fixed, decay or warmup) float sched; // schedule multiplier (fixed, decay or warmup)
float decay; // weight decay for AdamW, use 0.0f to disable float decay; // weight decay for AdamW, use 0.0f to disable
int decay_min_ndim; // minimum number of tensor dimension to apply weight decay
float alpha; // learning rate float alpha; // learning rate
float beta1; float beta1;
float beta2; float beta2;
float eps; // epsilon for numerical stability float eps; // epsilon for numerical stability
float eps_f; // epsilon for convergence test float eps_f; // epsilon for convergence test
float eps_g; // epsilon for convergence test float eps_g; // epsilon for convergence test
float gclip; // gradient clipping
} adam; } adam;
// LBFGS parameters // LBFGS parameters
@ -1737,14 +1750,12 @@ extern "C" {
bool just_initialized; bool just_initialized;
float loss_before;
float loss_after;
struct { struct {
struct ggml_tensor * x; // view of the parameters
struct ggml_tensor * g1; // gradient
struct ggml_tensor * g2; // gradient squared
struct ggml_tensor * m; // first moment struct ggml_tensor * m; // first moment
struct ggml_tensor * v; // second moment struct ggml_tensor * v; // second moment
struct ggml_tensor * mh; // first moment hat
struct ggml_tensor * vh; // second moment hat
struct ggml_tensor * pf; // past function values struct ggml_tensor * pf; // past function values
float fx_best; float fx_best;
float fx_prev; float fx_prev;
@ -1781,10 +1792,10 @@ extern "C" {
// initialize optimizer context // initialize optimizer context
GGML_API void ggml_opt_init( GGML_API void ggml_opt_init(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_opt_context * opt, struct ggml_opt_context * opt,
struct ggml_opt_params params, struct ggml_opt_params params,
int64_t nx); int64_t nx);
// continue optimizing the function defined by the tensor f // continue optimizing the function defined by the tensor f
GGML_API enum ggml_opt_result ggml_opt_resume( GGML_API enum ggml_opt_result ggml_opt_resume(
@ -1798,7 +1809,9 @@ extern "C" {
struct ggml_opt_context * opt, struct ggml_opt_context * opt,
struct ggml_tensor * f, struct ggml_tensor * f,
struct ggml_cgraph * gf, struct ggml_cgraph * gf,
struct ggml_cgraph * gb); struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data);
// //
// quantization // quantization

View file

@ -2694,13 +2694,13 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
__m256i p16l = _mm256_maddubs_epi16(q4l, q8l); __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
p16l = _mm256_madd_epi16(scale_l, p16l); p16l = _mm256_madd_epi16(scale_l, p16l);
sumi = _mm256_add_epi32(sumi, p16l);
const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
__m256i p16h = _mm256_maddubs_epi16(q4h, q8h); __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
p16h = _mm256_madd_epi16(scale_h, p16h); p16h = _mm256_madd_epi16(scale_h, p16h);
sumi = _mm256_add_epi32(sumi, p16h); const __m256i sumj = _mm256_add_epi32(p16l, p16h);
sumi = _mm256_add_epi32(sumi, sumj);
} }
__m256 vd = _mm256_set1_ps(d); __m256 vd = _mm256_set1_ps(d);

View file

@ -6247,6 +6247,34 @@ const char * llama_print_system_info(void) {
return s.c_str(); return s.c_str();
} }
void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
fprintf(stream, "\n");
fprintf(stream, "###########\n");
fprintf(stream, "# Timings #\n");
fprintf(stream, "###########\n");
fprintf(stream, "\n");
fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
1.0e-3 * ctx->t_eval_us / ctx->n_eval);
fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
1.0e-3 * ctx->t_sample_us / ctx->n_sample);
fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
1.0e6 * ctx->n_eval / ctx->t_eval_us);
fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
1.0e6 * ctx->n_sample / ctx->t_sample_us);
}
// For internal test use // For internal test use
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) { const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
return ctx->model.tensors_by_name; return ctx->model.tensors_by_name;
@ -6257,10 +6285,6 @@ void llama_log_set(llama_log_callback log_callback, void * user_data) {
g_state.log_callback_user_data = user_data; g_state.log_callback_user_data = user_data;
} }
#if defined(_MSC_VER) && !defined(vsnprintf)
#define vsnprintf _vsnprintf
#endif
static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) { static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
va_list args_copy; va_list args_copy;
va_copy(args_copy, args); va_copy(args_copy, args);

View file

@ -10,6 +10,7 @@
#endif // GGML_USE_CUBLAS #endif // GGML_USE_CUBLAS
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <stdio.h>
#include <stdbool.h> #include <stdbool.h>
#ifdef LLAMA_SHARED #ifdef LLAMA_SHARED
@ -496,7 +497,7 @@ extern "C" {
// Type of pointer to the beam_search_callback function. // Type of pointer to the beam_search_callback function.
// void* callback_data is any custom data passed to llama_beam_search, that is subsequently // void* callback_data is any custom data passed to llama_beam_search, that is subsequently
// passed back to beam_search_callback. This avoids having to use global variables in the callback. // passed back to beam_search_callback. This avoids having to use global variables in the callback.
typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, llama_beams_state); typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, struct llama_beams_state);
/// @details Deterministically returns entire sentence constructed by a beam search. /// @details Deterministically returns entire sentence constructed by a beam search.
/// @param ctx Pointer to the llama_context. /// @param ctx Pointer to the llama_context.
@ -520,6 +521,8 @@ extern "C" {
// If this is not called, or NULL is supplied, everything is output on stderr. // If this is not called, or NULL is supplied, everything is output on stderr.
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

140
run_with_preset.py Executable file
View file

@ -0,0 +1,140 @@
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import yaml
CLI_ARGS_MAIN_PERPLEXITY = [
"batch-size", "cfg-negative-prompt", "cfg-scale", "chunks", "color", "ctx-size", "escape",
"export", "file", "frequency-penalty", "grammar", "grammar-file", "hellaswag",
"hellaswag-tasks", "ignore-eos", "in-prefix", "in-prefix-bos", "in-suffix", "instruct",
"interactive", "interactive-first", "keep", "logdir", "logit-bias", "lora", "lora-base",
"low-vram", "main-gpu", "memory-f32", "mirostat", "mirostat-ent", "mirostat-lr", "mlock",
"model", "mtest", "multiline-input", "n-gpu-layers", "n-predict", "no-mmap", "no-mul-mat-q",
"np-penalize-nl", "numa", "ppl-output-type", "ppl-stride", "presence-penalty", "prompt",
"prompt-cache", "prompt-cache-all", "prompt-cache-ro", "random-prompt", "repeat-last-n",
"repeat-penalty", "reverse-prompt", "rope-freq-base", "rope-freq-scale", "rope-scale", "seed",
"simple-io", "tensor-split", "threads", "temp", "tfs", "top-k", "top-p", "typical",
"verbose-prompt"
]
CLI_ARGS_LLAMA_BENCH = [
"batch-size", "memory-f32", "low-vram", "model", "mul-mat-q", "n-gen", "n-gpu-layers",
"n-prompt", "output", "repetitions", "tensor-split", "threads", "verbose"
]
CLI_ARGS_SERVER = [
"alias", "batch-size", "ctx-size", "embedding", "host", "memory-f32", "lora", "lora-base",
"low-vram", "main-gpu", "mlock", "model", "n-gpu-layers", "n-probs", "no-mmap", "no-mul-mat-q",
"numa", "path", "port", "rope-freq-base", "timeout", "rope-freq-scale", "tensor-split",
"threads", "verbose"
]
description = """Run llama.cpp binaries with presets from YAML file(s).
To specify which binary should be run, specify the "binary" property (main, perplexity, llama-bench, and server are supported).
To get a preset file template, run a llama.cpp binary with the "--logdir" CLI argument.
Formatting considerations:
- The YAML property names are the same as the CLI argument names of the corresponding binary.
- Properties must use the long name of their corresponding llama.cpp CLI arguments.
- Like the llama.cpp binaries the property names do not differentiate between hyphens and underscores.
- Flags must be defined as "<PROPERTY_NAME>: true" to be effective.
- To define the logit_bias property, the expected format is "<TOKEN_ID>: <BIAS>" in the "logit_bias" namespace.
- To define multiple "reverse_prompt" properties simultaneously the expected format is a list of strings.
- To define a tensor split, pass a list of floats.
"""
usage = "run_with_preset.py [-h] [yaml_files ...] [--<ARG_NAME> <ARG_VALUE> ...]"
epilog = (" --<ARG_NAME> specify additional CLI ars to be passed to the binary (override all preset files). "
"Unknown args will be ignored.")
parser = argparse.ArgumentParser(
description=description, usage=usage, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-bin", "--binary", help="The binary to run.")
parser.add_argument("yaml_files", nargs="*",
help="Arbitrary number of YAML files from which to read preset values. "
"If two files specify the same values the later one will be used.")
known_args, unknown_args = parser.parse_known_args()
if not known_args.yaml_files and not unknown_args:
parser.print_help()
sys.exit(0)
props = dict()
for yaml_file in known_args.yaml_files:
with open(yaml_file, "r") as f:
props.update(yaml.load(f, yaml.SafeLoader))
props = {prop.replace("_", "-"): val for prop, val in props.items()}
binary = props.pop("binary", "main")
if known_args.binary:
binary = known_args.binary
if os.path.exists(f"./{binary}"):
binary = f"./{binary}"
if binary.lower().endswith("main") or binary.lower().endswith("perplexity"):
cli_args = CLI_ARGS_MAIN_PERPLEXITY
elif binary.lower().endswith("llama-bench"):
cli_args = CLI_ARGS_LLAMA_BENCH
elif binary.lower().endswith("server"):
cli_args = CLI_ARGS_SERVER
else:
print(f"Unknown binary: {binary}")
sys.exit(1)
command_list = [binary]
for cli_arg in cli_args:
value = props.pop(cli_arg, None)
if not value or value == -1:
continue
if cli_arg == "logit-bias":
for token, bias in value.items():
command_list.append("--logit-bias")
command_list.append(f"{token}{bias:+}")
continue
if cli_arg == "reverse-prompt" and not isinstance(value, str):
for rp in value:
command_list.append("--reverse-prompt")
command_list.append(str(rp))
continue
command_list.append(f"--{cli_arg}")
if cli_arg == "tensor-split":
command_list.append(",".join([str(v) for v in value]))
continue
value = str(value)
if value != "True":
command_list.append(str(value))
num_unused = len(props)
if num_unused > 10:
print(f"The preset file contained a total of {num_unused} unused properties.")
elif num_unused > 0:
print("The preset file contained the following unused properties:")
for prop, value in props.items():
print(f" {prop}: {value}")
command_list += unknown_args
sp = subprocess.Popen(command_list)
while sp.returncode is None:
try:
sp.wait()
except KeyboardInterrupt:
pass
sys.exit(sp.returncode)

View file

@ -20,6 +20,7 @@ fi
model="$1" model="$1"
out="../tmp/results-${model}" out="../tmp/results-${model}"
set -o pipefail
set -e set -e
mkdir -p ${out} mkdir -p ${out}

View file

@ -20,6 +20,7 @@ fi
model="$1" model="$1"
out="../tmp/results-${model}" out="../tmp/results-${model}"
set -o pipefail
set -e set -e
mkdir -p ${out} mkdir -p ${out}

View file

@ -17,6 +17,7 @@ if [ ! -z "$3" ]; then
args="$3" args="$3"
fi fi
set -o pipefail
set -e set -e
model="$1" model="$1"

View file

@ -275,14 +275,14 @@ static bool check_gradient(
ggml_graph_compute_with_ctx(ctx0, &gf, n_threads); ggml_graph_compute_with_ctx(ctx0, &gf, n_threads);
const float f0 = ggml_get_f32_1d(f, 0); const double f0 = ggml_get_f32_1d(f, 0);
ggml_set_f32_1d(x[i], k, xm); ggml_set_f32_1d(x[i], k, xm);
ggml_graph_compute_with_ctx(ctx0, &gf, n_threads); ggml_graph_compute_with_ctx(ctx0, &gf, n_threads);
const float f1 = ggml_get_f32_1d(f, 0); const double f1 = ggml_get_f32_1d(f, 0);
const float g0 = (f0 - f1)/(2.0f*eps); const double g0 = (f0 - f1)/(2.0*(double) eps);
ggml_set_f32_1d(x[i], k, x0); ggml_set_f32_1d(x[i], k, x0);
@ -292,10 +292,10 @@ static bool check_gradient(
ggml_graph_compute_with_ctx(ctx0, &gb, n_threads); ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
const float g1 = ggml_get_f32_1d(x[i]->grad, k); const double g1 = ggml_get_f32_1d(x[i]->grad, k);
const float error_abs = fabsf(g0 - g1); const double error_abs = fabs(g0 - g1);
const float error_rel = g0 != 0 ? fabsf(g0 - g1)/fabsf(g0) : 0; const double error_rel = g0 != 0 ? fabs(g0 - g1)/fabs(g0) : 0;
if (error_abs > max_error_abs || error_rel > max_error_rel) { if (error_abs > max_error_abs || error_rel > max_error_rel) {
printf("%s: ndims=%d, i=%d, k=%d, x0=%f, xm=%f, xp=%f, f0=%f, f1=%f, g0=%f, g1=%f, eps=%f, error_abs=%f, error_rel=%f\n", printf("%s: ndims=%d, i=%d, k=%d, x0=%f, xm=%f, xp=%f, f0=%f, f1=%f, g0=%f, g1=%f, eps=%f, error_abs=%f, error_rel=%f\n",
@ -531,7 +531,7 @@ int main(int argc, const char ** argv) {
struct ggml_tensor * f = ggml_sum(ctx0, ggml_sqrt(ctx0, x[0])); struct ggml_tensor * f = ggml_sum(ctx0, ggml_sqrt(ctx0, x[0]));
check_gradient("sqrt", ctx0, x, f, ndims, nargs, 1e-3f, INFINITY, 1e-1f); check_gradient("sqrt", ctx0, x, f, ndims, nargs, 1e-3f, 2e-2f, 1e-1f);
} }
} }
@ -1345,9 +1345,18 @@ int main(int argc, const char ** argv) {
x[0] = get_random_tensor_f32(ctx0, ndims, ne2, -1.0f, 1.0f); x[0] = get_random_tensor_f32(ctx0, ndims, ne2, -1.0f, 1.0f);
ggml_set_param(ctx0, x[0]); ggml_set_param(ctx0, x[0]);
struct ggml_tensor * f = ggml_sum(ctx0, ggml_soft_max(ctx0, x[0])); float eps = 1e-6f;
// dont use only sum as aggregation, because sum of softmax is always 1 -> finite differences should not work
// instead use sum(log(soft_max()*(1-eps)+eps)); use eps to avoid log(0)
struct ggml_tensor * f = ggml_sum(ctx0,
ggml_log(ctx0,
ggml_add1(ctx0,
ggml_scale(ctx0,
ggml_soft_max(ctx0, x[0]),
ggml_new_f32(ctx0, 1.0f - eps)),
ggml_new_f32(ctx0, eps))));
check_gradient("softmax", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY); check_gradient("softmax", ctx0, x, f, ndims, nargs, 1e-3f, 2e-1f, INFINITY);
} }
} }
@ -1358,15 +1367,26 @@ int main(int argc, const char ** argv) {
int64_t ne2[4]; int64_t ne2[4];
get_random_dims(ne2, 4); get_random_dims(ne2, 4);
for (int ndims = 1; ndims <= 3; ++ndims) { for (int ndims = 1; ndims <= 4; ++ndims) {
x[0] = get_random_tensor_f32(ctx0, ndims, ne2, -1.0f, 1.0f); x[0] = get_random_tensor_f32(ctx0, ndims, ne2, -0.1f, 0.1f);
x[1] = get_random_tensor_f32(ctx0, ndims, ne2, 0.0f, 1.0f); x[1] = get_random_tensor_f32(ctx0, ndims, ne2, 0.0f, 1.0f);
// the second argument to cross_entropy_loss must sum up to 1 for each row
int nr = ggml_nrows(x[1]);
int nc = ggml_nelements(x[1]) / nr;
for (int ir = 0; ir < nr; ++ir) {
float sum = 0;
for (int ic = 0; ic < nc; ++ic) {
sum += ((float *) x[1]->data)[ic + ir*nc];
}
for (int ic = 0; ic < nc; ++ic) {
((float *) x[1]->data)[ic + ir*nc] /= sum;
}
}
ggml_set_param(ctx0, x[0]); ggml_set_param(ctx0, x[0]);
struct ggml_tensor * f = ggml_sum(ctx0, ggml_cross_entropy_loss(ctx0, x[0], x[1])); struct ggml_tensor * f = ggml_cross_entropy_loss(ctx0, x[0], x[1]);
check_gradient("cross_entropy_loss", ctx0, x, f, ndims, nargs, 1e-1f, 1e-2f, INFINITY); check_gradient("cross_entropy_loss", ctx0, x, f, ndims, nargs, 1e-4f, 1e-3f, INFINITY);
// finite differences regularly fails!
} }
} }
@ -1473,7 +1493,7 @@ int main(int argc, const char ** argv) {
struct ggml_tensor * f = ggml_sum(ctx0, ggml_flash_attn(ctx0, x[0], x[1], x[2], (masked == 0))); struct ggml_tensor * f = ggml_sum(ctx0, ggml_flash_attn(ctx0, x[0], x[1], x[2], (masked == 0)));
check_gradient("flash_attn f32", ctx0, x, f, ndims, nargs, 1.5e-4f, INFINITY, 3.5f); check_gradient("flash_attn f32", ctx0, x, f, ndims, nargs, 1.5e-4f, 1e-3f, INFINITY);
} }
} }
} }
@ -1514,7 +1534,7 @@ int main(int argc, const char ** argv) {
struct ggml_tensor * f = ggml_sum(ctx0, ggml_flash_attn(ctx0, x[0], x[1], x[2], (masked == 0))); struct ggml_tensor * f = ggml_sum(ctx0, ggml_flash_attn(ctx0, x[0], x[1], x[2], (masked == 0)));
check_gradient("flash_attn f16", ctx0, x, f, ndims, nargs, 1.5e-4f, INFINITY, 3.5f); check_gradient("flash_attn f16", ctx0, x, f, ndims, nargs, 1.5e-4f, 1e-3f, INFINITY);
} }
} }
} }