diff --git a/.editorconfig b/.editorconfig
index 16d16b3b5..bd525e13f 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -26,3 +26,6 @@ indent_size = 2
[examples/llama.swiftui/llama.swiftui.xcodeproj/*]
indent_style = tab
+
+[examples/cvector-generator/*.txt]
+insert_final_newline = unset
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index e6d032d87..997c6d9d0 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,5 +1,7 @@
-- Self Reported Review Complexity:
- - [ ] Review Complexity : Low
- - [ ] Review Complexity : Medium
- - [ ] Review Complexity : High
-- [ ] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md)
+
+
+- [x] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md)
+- Self-reported review complexity:
+ - [ ] Low
+ - [ ] Medium
+ - [ ] High
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 08481334f..c90414afa 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -119,6 +119,7 @@ option(LLAMA_HIP_UMA "llama: use HIP unified memory arch
option(LLAMA_VULKAN "llama: use Vulkan" OFF)
option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF)
option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF)
+option(LLAMA_VULKAN_MEMORY_DEBUG "llama: enable Vulkan memory debug output" OFF)
option(LLAMA_VULKAN_VALIDATE "llama: enable Vulkan validation" OFF)
option(LLAMA_VULKAN_RUN_TESTS "llama: run Vulkan tests" OFF)
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
@@ -534,6 +535,10 @@ if (LLAMA_VULKAN)
add_compile_definitions(GGML_VULKAN_DEBUG)
endif()
+ if (LLAMA_VULKAN_MEMORY_DEBUG)
+ add_compile_definitions(GGML_VULKAN_MEMORY_DEBUG)
+ endif()
+
if (LLAMA_VULKAN_VALIDATE)
add_compile_definitions(GGML_VULKAN_VALIDATE)
endif()
@@ -684,7 +689,8 @@ if (LLAMA_SYCL)
endif()
set(GGML_HEADERS_SYCL ggml-sycl.h)
- set(GGML_SOURCES_SYCL ggml-sycl.cpp)
+ file(GLOB GGML_SOURCES_SYCL "ggml-sycl/*.cpp")
+ list(APPEND GGML_SOURCES_SYCL "ggml-sycl.cpp")
if (WIN32)
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl sycl7 OpenCL mkl_sycl_blas_dll.lib mkl_intel_ilp64_dll.lib mkl_sequential_dll.lib mkl_core_dll.lib)
diff --git a/Makefile b/Makefile
index 744fe5739..f94ee3933 100644
--- a/Makefile
+++ b/Makefile
@@ -38,6 +38,7 @@ BUILD_TARGETS = \
llama-tokenize \
llama-train-text-from-scratch \
llama-vdot \
+ llama-cvector-generator \
tests/test-c.o
# Binaries only useful for tests
@@ -607,6 +608,10 @@ ifdef LLAMA_VULKAN_DEBUG
MK_CPPFLAGS += -DGGML_VULKAN_DEBUG
endif
+ifdef LLAMA_VULKAN_MEMORY_DEBUG
+ MK_CPPFLAGS += -DGGML_VULKAN_MEMORY_DEBUG
+endif
+
ifdef LLAMA_VULKAN_VALIDATE
MK_CPPFLAGS += -DGGML_VULKAN_VALIDATE
endif
@@ -922,6 +927,10 @@ llama-eval-callback: examples/eval-callback/eval-callback.cpp ggml.o llama.o $(C
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
+llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
+ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
+ $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
+
llama-train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
diff --git a/README-sycl.md b/README-sycl.md
index 93b623daf..bd1984706 100644
--- a/README-sycl.md
+++ b/README-sycl.md
@@ -1,6 +1,7 @@
# llama.cpp for SYCL
- [Background](#background)
+- [Recommended Release](#recommended-release)
- [News](#news)
- [OS](#os)
- [Hardware](#hardware)
@@ -31,8 +32,23 @@ When targeting **Intel CPU**, it is recommended to use llama.cpp for [Intel oneM
It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [IntelĀ® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose.
+## Recommended Release
+
+The SYCL backend would be broken by some PRs due to no online CI.
+
+The following release is verified with good quality:
+
+|Commit ID|Tag|Release|Verified Platform|
+|-|-|-|-|
+|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1|
+
+
## News
+- 2024.5
+ - Performance is increased: 34 -> 37 tokens/s of llama-2-7b.Q4_0 on Arc770.
+ - Arch Linux is verified successfully.
+
- 2024.4
- Support data types: GGML_TYPE_IQ4_NL, GGML_TYPE_IQ4_XS, GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ3_S, GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M.
diff --git a/README.md b/README.md
index 6c24135d6..54859946d 100644
--- a/README.md
+++ b/README.md
@@ -195,6 +195,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT)
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
- [pythops/tenere](https://github.com/pythops/tenere) (AGPL)
+- [RAGNA Desktop](https://ragna.app/) (proprietary)
- [RecurseChat](https://recurse.chat/) (proprietary)
- [semperai/amica](https://github.com/semperai/amica)
- [withcatai/catai](https://github.com/withcatai/catai)
@@ -386,6 +387,30 @@ brew install llama.cpp
```
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
+### Nix
+
+On Mac and Linux, the Nix package manager can be used via
+```
+nix profile install nixpkgs#llama-cpp
+```
+For flake enabled installs.
+
+Or
+```
+nix-env --file '' --install --attr llama-cpp
+```
+For non-flake enabled installs.
+
+This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
+
+#### Flox
+
+On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via
+```
+flox install llama-cpp
+```
+Flox follows the nixpkgs build of llama.cpp.
+
### Metal Build
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
diff --git a/common/common.cpp b/common/common.cpp
index 1591790e6..73ff0e85b 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -1576,6 +1576,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
return true;
}
params.out_file = argv[i];
+ params.cvector_outfile = argv[i];
return true;
}
if (arg == "-ofreq" || arg == "--output-frequency") {
@@ -1610,6 +1611,55 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.i_chunk = std::stoi(argv[i]);
return true;
}
+ // cvector params
+ if (arg == "--completions-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.cvector_completions_file = argv[i];
+ return true;
+ }
+ if (arg == "--positive-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.cvector_positive_file = argv[i];
+ return true;
+ }
+ if (arg == "--negative-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.cvector_negative_file = argv[i];
+ return true;
+ }
+ if (arg == "--completions") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_completions = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--pca-batch") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_pca_batch = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--pca-iter") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_pca_iterations = std::stoi(argv[i]);
+ return true;
+ }
#ifndef LOG_DISABLE_LOGS
// Parse args for logging parameters
if (log_param_single_parse(argv[i])) {
@@ -1931,6 +1981,16 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "logging", " --log-append", "Don't truncate the old log file." });
#endif // LOG_DISABLE_LOGS
+ options.push_back({ "cvector" });
+ options.push_back({ "cvector", "-o, --output FNAME", "output file (default: '%s')", params.cvector_outfile.c_str() });
+ options.push_back({ "cvector", " --positive-file FNAME", "positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str() });
+ options.push_back({ "cvector", " --negative-file FNAME", "negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str() });
+ options.push_back({ "cvector", " --completions-file FNAME",
+ "completions file (default: '%s')", params.cvector_completions_file.c_str() });
+ options.push_back({ "cvector", " --completions N", "number of lines of completions file to use (default: %d)", params.n_completions });
+ options.push_back({ "cvector", " --batch-pca N", "batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch });
+ options.push_back({ "cvector", " --iter-pca N", "number of iterations used for PCA (default: %d)", params.n_pca_iterations });
+
printf("usage: %s [options]\n", argv[0]);
for (const auto & o : options) {
diff --git a/common/common.h b/common/common.h
index 2345d855e..58ed72f43 100644
--- a/common/common.h
+++ b/common/common.h
@@ -232,6 +232,15 @@ struct gpt_params {
bool process_output = false; // collect data for the output tensor
bool compute_ppl = true; // whether to compute perplexity
+
+ // cvector-generator params
+ int n_completions = 64;
+ int n_pca_batch = 20;
+ int n_pca_iterations = 1000;
+ std::string cvector_outfile = "control_vector.gguf";
+ std::string cvector_completions_file = "examples/cvector-generator/completions.txt";
+ std::string cvector_positive_file = "examples/cvector-generator/positive.txt";
+ std::string cvector_negative_file = "examples/cvector-generator/negative.txt";
};
void gpt_params_handle_model_default(gpt_params & params);
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index d6ce35f4c..0b51c44c0 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -12,6 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
if (EMSCRIPTEN)
else()
+ add_subdirectory(cvector-generator)
add_subdirectory(baby-llama)
add_subdirectory(batched-bench)
add_subdirectory(batched)
diff --git a/examples/cvector-generator/CMakeLists.txt b/examples/cvector-generator/CMakeLists.txt
new file mode 100644
index 000000000..0a559d60c
--- /dev/null
+++ b/examples/cvector-generator/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(TARGET llama-cvector-generator)
+add_executable(${TARGET} cvector-generator.cpp pca.hpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_11)
diff --git a/examples/cvector-generator/README.md b/examples/cvector-generator/README.md
new file mode 100644
index 000000000..7b0e79c1f
--- /dev/null
+++ b/examples/cvector-generator/README.md
@@ -0,0 +1,34 @@
+# cvector-generator
+
+This example demonstrates how to generate a control vector using gguf models.
+
+Related PRs:
+- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970)
+- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880)
+- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
+
+## Examples
+
+```sh
+# CPU only
+./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf
+
+# With GPU
+./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99
+
+# With advanced options
+./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --batch-pca 100
+
+# To see help message
+./cvector-generator -h
+# Then, have a look at "cvector" section
+```
+
+## Tips and tricks
+
+If you have multiple lines per prompt, you can escape the newline character (change it to `\n`). For example:
+
+```
+<|im_start|>system\nAct like a person who is extremely happy.<|im_end|>
+<|im_start|>system\nYou are in a very good mood today<|im_end|>
+```
diff --git a/examples/cvector-generator/completions.txt b/examples/cvector-generator/completions.txt
new file mode 100644
index 000000000..abc45ffd8
--- /dev/null
+++ b/examples/cvector-generator/completions.txt
@@ -0,0 +1,582 @@
+
+That game
+I can see
+Hmm, this
+I can relate to
+Who is
+I understand the
+Ugh,
+What the hell was
+Hey, did anyone
+Although
+Thank you for choosing
+What are you
+Oh w
+How dare you open
+It was my pleasure
+I'm hon
+I appreciate that you
+Are you k
+Whoever left this
+It's always
+Ew,
+Hey, I l
+Hello? Is someone
+I understand that
+That poem
+Aww, poor
+Hey, it
+Alright, who
+I didn't
+Well, life
+The document
+Oh no, this
+I'm concerned
+Hello, this is
+This art
+Hmm, this drink
+Hi there!
+It seems
+Is
+Good
+I can't
+Ex
+Who are
+I can see that
+Wow,
+Today is a
+Hey friend
+Sometimes friends
+Oh, this old
+The weather outside
+This place is sur
+I appreciate your input
+Thank you for the
+Look at
+I'm disappoint
+To my
+How dare you
+That's an
+This piece of art
+Eww
+This park is
+This is incredible
+Oh no, someone
+Exc
+Well, it'
+I warned
+Hey, I understand
+Hey, I saw
+How dare you go
+What the he
+Hey
+It's
+Hello? Hello?
+It
+Oh no!
+This is the perfect
+Good morning,
+Oh no, there
+It's so
+Yeah
+Uh,
+Hello everyone
+Who turned off
+The weather
+Who'
+Hey, this
+Wait,
+Eww, gross
+Excuse
+It seems like you
+Thank you so
+What happened?
+Oh my g
+I am deeply sad
+I war
+Okay, let'
+Hey, that
+That was a beautiful
+Oh no! That
+What happened
+Hey there
+The artist'
+What?!
+Hey, it'
+I am disappoint
+It seems like
+Oh no! The
+This park is a
+If you
+Yes! I did
+It sounds
+What
+Who is it
+Hmm, that
+That's strange
+Yeah, that was
+That's interesting
+This park
+What the hell
+Who is that
+I feel like my
+Oh well
+What the hell is
+Hello? Hello
+To my dearest
+Bless you!\"
+Thank you for
+Oh, looks like
+Can you please
+This place is
+Eww, what
+Bless you
+Is everything
+Hey, I just
+Whoever left these
+Well, that'
+I feel
+Hey, do you
+It's sad
+Oh no, it
+Hey, that'
+Oh my god,
+Thank you,
+Hello little one,
+I apolog
+Hey team, I
+How dare you read
+Who is this and
+Whoever left
+Hi there! W
+A
+If you have
+I was
+U
+Bless
+Well, this
+Oh, I'
+It's a
+Eww,
+Is everything okay?
+Oh, I
+Hello, can you
+Al
+That was a great
+What are
+I understand that not
+Oh no, not
+Who is it?\"
+Hey, can we
+Whoever is taking
+I would love to
+Hey, I noticed
+Hey, could
+I understand that there
+Hello?
+D
+Oh man, I
+Thank you so much
+Oh no, my
+Dear [Name
+Uh
+I remember
+Hey, who
+Well, it
+Are you
+I understand that it
+Hey, is
+I would
+Who is this
+Excuse me
+Alright
+I am thrilled
+Sometimes friends have
+Who the
+It's interesting
+I would love
+E
+Hello? Is anyone
+Well, this is
+This place
+Well,
+I warned you
+Hey, watch where
+Oh my
+That'
+Sometimes friends have different
+I understand that everyone
+What?
+What do these notes
+I can relate
+I'm not
+I understand
+To my dear
+Guys
+Well
+Hey, I appreciate
+Wow, what
+Dear
+That melody
+Who the hell
+Today is
+Hello little
+Wow, look
+That's great
+Love is never wrong
+I'm having
+Whoa, did
+Ugh
+Can you please provide
+I miss you,
+I feel uncom
+I know
+Ugh, this
+Hey, watch
+Oh great, a
+I didn
+Okay
+That game of char
+Oh
+I appreciate
+Who's there
+I am so
+Oh great, someone
+Hey, could you
+I remember wondering
+Wait, what?
+What do
+Hello? Can
+Hey there,
+That game of
+This is incred
+Oh my gosh
+Oh great, f
+I appreciate your
+It sounds like
+What the heck
+Okay, I understand
+Ew
+I understand that this
+Uh, hi
+Hi everyone!
+What the hell?
+Thank you for your
+Oh no, the
+Wow, I
+Who turned
+Dear [
+Whoever
+This is a
+Whoa, he
+What in the world
+Although the physical
+Hello, who is
+That's amaz
+Hey, I know
+Okay, that
+Hi everyone
+Hey, is everything
+I understand your fr
+Oh no, poor
+Oh, look
+Good morning
+Ew, gross
+Oh no, did
+Look at the family
+Hey team
+Yes!
+Hey, can I
+Okay, that'
+It's great
+Love is
+Hey, what
+Good morning, world
+Who is it?
+That poem really reson
+I
+That's
+I understand the task
+Gu
+Hello? Who'
+This postcard is
+Whoa,
+Oh, that
+I understand that I
+Whoever is
+Hello? Who is
+I'm really
+Wow, this
+Can
+This artwork really
+This is a shame
+I miss you too
+Who are you?
+Today is a difficult
+Hey, just
+Are you okay
+I am
+Hi,
+Wow, that
+Hey there! Can
+Okay, stay
+Oh great, just
+Yeah,
+Hello? Can you
+Oh, looks
+Thank you for sharing
+I'm glad
+Hey, is that
+Hmm
+It was my
+It sounds like you
+Wow, your
+I was promised certain
+That was such a
+Thank
+Excuse you
+That was
+Hey team,
+I feel un
+It was
+What'
+Hey friend, I
+How
+Saying goodbye
+That
+It's heart
+How dare
+Oh,
+Hello, may
+What's this
+Thank you for recogn
+Aww, that
+Oh, I remember
+Hmm, that'
+I miss
+I know this
+Wait
+Is everything okay
+Who is that person
+Wow, you
+Oh great
+I'm sad
+Wow, the
+I am very disappoint
+Who turned off the
+I understand that things
+I'm very
+Hi
+That's very
+Okay, I
+Oh no,
+Wow, there
+What's wrong
+I apologize for
+Hey, I
+Can I help you
+Oh, I didn
+Alright,
+Oh wow,
+Oh my goodness
+I know this event
+What in the
+Saying
+Yeah, that
+Guys, I
+Hey, this v
+This post
+Are
+Hey, can
+Hello? Is
+I can only imagine
+Oh, that sounds
+Hey, is anyone
+I am disappointed
+Hello,
+Hey everyone, I
+That was such
+It's okay
+The artist
+Whoa
+I understand that mistakes
+Can I help
+Who
+Hi everyone! I
+Hey, can you
+Wow, how
+Today
+Oh no, I
+Oh well, I
+Well, that
+This is the
+Yes! I finally
+Hey there little
+Hello everyone!
+Love is never
+Look at the
+This postcard
+Oh great,
+Can I
+Hmm, this is
+I understand your
+Oh, look at
+B
+I'm so
+Whoa, this
+W
+Oh, this
+Sometimes
+This piece of
+What the
+That was a
+Hey, do
+Oh no
+Whoa, what
+I feel like I
+The documentary
+Hello
+Hello little one
+I understand that my
+Eww, that
+Wow, an
+Yes! Finally,
+Although the physical location
+Whoever is watching
+That movie
+I remember wondering about
+Hey there, little
+Who's
+Hello, who
+Hello everyone! Thank
+Hello, can
+That's too
+Hey, just wanted
+Hey there, I
+Saying good
+Hey there!
+Who is there?
+Oh my good
+I am very
+Oh no, what
+Wow, thank
+I was promised
+Hi, is
+Hey, I'
+Guys, the
+Oh no, that
+Who is there
+Hello, this
+That movie really touched
+If you have something
+The documentary was
+I'm starting
+Are you kidd
+That movie really
+Hey everyone,
+Thank you for considering
+I didn'
+Yes! I
+Can you
+Oh my god
+Hey, whoever
+That melody really
+Thank you, little
+Hello, may I
+Look
+Wow, we
+It looks
+What do these
+Oh wow
+I apologize
+What are you all
+It's such
+It's clear
+Hey, I was
+Hey friend,
+I can only
+The weather outside is
+Eww, this
+I miss you
+Wow
+Aww,
+Hi, is there
+This artwork
+Okay,
+Oh well,
+This
+I'
+Say
+Hey there little gu
+Hmm,
+Whoa, who
+I am thr
+Oh man
+Okay, stay calm
+I'm happy
+Oh, this cur
+Oh man,
+I'm sorry
+Hello? Who
+What?! That
+This piece
+Hey everyone
+That's so
+Are you okay?
+What happened? Where
+Hi there
+The
+Who the hell entered
+I can
+Guys,
+What's
+What in
+It's important
+I'm
+I'm coming
+It'
+Yes! Finally
+Wait, what
+Wow, reading
+I'm surprised
+Hey, did
+Hey,
+Okay, let
+I understand that you
+Who the hell threw
+Eww, who
+Thank you for thinking
+Who is this?\"
+I am deeply
+Thank you for including
+Oh no, an
+It looks like you
+Aww
+I'm confused
+Wow, it
+That poem really
+Yes
+Hey there, is
+Hey, what'
+Thank you for remember
+To
+This is
+Thank you for making
+I can'
+That mel
+Wow, they
+I feel like
+Although the
+Who are you
+Love
+If
+What the hell are
+I am so sad
+Oh, I found
+Thank you
+It looks like
+Well, life is
+I appreciate that
+The artist's
+Whoa, that
+It's never
\ No newline at end of file
diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp
new file mode 100644
index 000000000..9941683db
--- /dev/null
+++ b/examples/cvector-generator/cvector-generator.cpp
@@ -0,0 +1,499 @@
+#include "common.h"
+#include "llama.h"
+#include "ggml.h"
+#include "pca.hpp"
+
+#ifdef GGML_USE_CUDA
+#include "ggml-cuda.h"
+#endif
+
+#ifdef GGML_USE_METAL
+#include "ggml-metal.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+//////////////////////////////////////////////////
+// utils
+
+template
+static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
+ std::string ret;
+ for (; begin != end; ++begin) {
+ ret += llama_token_to_piece(ctx, *begin);
+ }
+
+ return ret;
+}
+
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
+
+ printf("\nexample usage:\n");
+ printf("\n CPU only: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf\n", argv[0]);
+ printf("\n with GPU: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99\n", argv[0]);
+ printf("\n advanced: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --batch-pca 100\n", argv[0]);
+ printf("\n");
+}
+
+//////////////////////////////////////////////////
+
+
+// cb_eval is reused for each pair of positive - negative prompt
+struct callback_data {
+ ggml_context * ctx_ggml = nullptr; // holds v_pos, v_neg, v_diff_filtered
+
+ int n_layers = 0;
+ int n_tokens = 0;
+ bool is_eval_pos = true;
+
+ // each element of the vector correspond to one layer
+ std::vector v_pos; // vector of matrices of size [n_embd, n_tokens]
+ std::vector v_neg; // vector of matrices of size [n_embd, n_tokens]
+ std::vector v_diff_filtered; // vector of matrices of size [n_embd, n_nonzero_rows]. NOTE: n_nonzero_rows maybe different for each layer
+
+ // save a tensor into either v_pos or v_neg (decided by is_eval_pos)
+ void save_tensor_for_layer(struct ggml_tensor * t) {
+ GGML_ASSERT(t->type == GGML_TYPE_F32);
+
+ if (ctx_ggml == nullptr) {
+ // alloc a new ctx_ggml if needed
+ struct ggml_init_params params_ggml = {
+ /*.mem_size =*/ ggml_tensor_overhead() * n_layers * 3u,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+ ctx_ggml = ggml_init(params_ggml);
+ }
+
+ // copy tensor data
+ auto n_bytes = ggml_nbytes(t);
+ struct ggml_tensor * t_layer = ggml_new_tensor_2d(ctx_ggml, t->type, t->ne[0], t->ne[1]);
+ t_layer->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
+ ggml_backend_tensor_get(t, t_layer->data, 0, n_bytes);
+ ggml_set_name(t_layer, ggml_get_name(t));
+ //print_debug_tensor(t_layer);
+
+ if (is_eval_pos) {
+ v_pos.push_back(t_layer);
+ } else {
+ v_neg.push_back(t_layer);
+ }
+ }
+
+ // calculate diff (v_pos - v_neg) and place the result back to v_pos
+ // all zero rows in the diff tensor will also be removed
+ // NOTE: final layer is ignored. we only have (n_layers - 1) to process
+ std::vector calc_diff() {
+ for (float il = 0; il < v_pos.size(); il++) {
+ float * a = (float *) v_pos[il]->data;
+ float * b = (float *) v_neg[il]->data;
+ size_t n_elem = ggml_nelements(v_pos[il]);
+ for (size_t j = 0; j < n_elem; j++) {
+ a[j] -= b[j];
+ }
+ //print_debug_tensor(v_pos[i]);
+ auto diff_filtered = filter_nonzero_rows(v_pos[il]);
+ v_diff_filtered.push_back(diff_filtered);
+ }
+ return v_diff_filtered; // for convinient, we return the result std::vector
+ }
+
+ // delete zero rows from a given 2D tensor
+ struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
+ //printf("filter_nonzero_rows\n");
+ auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
+ // check if given row containing all zero elements
+ int n_cols = t->ne[0]; // hint: should be equal to n_embd
+ for (int col = 0; col < n_cols; ++col) {
+ if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) {
+ return false;
+ }
+ }
+ return true;
+ };
+ std::vector rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
+ for (int i_row = 0; i_row < a->ne[1]; i_row++) {
+ if (!is_row_all_zeros(a, i_row, 1e-6)) {
+ rows_to_copy.push_back(i_row);
+ }
+ }
+
+ // get "n_nonzero_rows" for the output "diff_filtered"
+ int n_nonzero_rows = rows_to_copy.size();
+ //printf("n_nonzero_rows: %d\n", n_nonzero_rows);
+ int n_embd = a->ne[0];
+ GGML_ASSERT(n_nonzero_rows > 0);
+
+ // diff_filtered: [n_embd, n_nonzero_rows]
+ struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
+ ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
+ ggml_format_name(diff_filtered, "diff_filtered_%s", a->name);
+ diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
+
+ // copy non-zero rows
+ for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
+ int src_row = rows_to_copy[dest_row];
+ for (int i = 0; i < n_embd; i++) {
+ float src_elem = ggml_get_f32_nd(a, i, src_row, 0, 0);
+ ggml_set_f32_nd(diff_filtered, i, dest_row, 0, 0, src_elem);
+ }
+ }
+
+ //print_debug_tensor(diff_filtered);
+
+ return diff_filtered;
+ }
+
+ // we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
+ void reset() {
+ for (auto ptr : v_pos) free(ptr->data);
+ for (auto ptr : v_neg) free(ptr->data);
+ for (auto ptr : v_diff_filtered) free(ptr->data);
+ v_pos.clear();
+ v_neg.clear();
+ v_diff_filtered.clear();
+ if (ctx_ggml) {
+ ggml_free(ctx_ggml);
+ }
+ ctx_ggml = nullptr;
+ }
+};
+
+/**
+ * process_ctx is used to store the ggml context for pre-post processing the diff vectors
+ * in short, input => v_diff and output => v_final
+ */
+struct train_context {
+ ggml_context * ctx_ggml;
+ int n_embd;
+ int n_layers;
+
+ /* pair of prompts to be used for generating final vector */
+ std::vector positive_entries;
+ std::vector negative_entries;
+
+ // each element of the vector correspond to one layer
+ // NOTE: the last layer is discard. therefore, we will have (n_layers - 1) elements here
+ // NOTE (2): v_diff is transposed from v_diff_tmp
+ std::vector v_diff; // vector of matrices of size [m, n_embd] where m ~ n_tokens * n_completions (v_diff contains no zero-rows)
+ std::vector v_final; // vector of vectors of size [n_embd] to be written to file
+
+ // to easily re-alloc when concat v_diff, we temporary store v_diff in a vector instead of a tensor
+ // v_diff_tmp will get converted unto v_diff later on
+ std::vector> v_diff_tmp;
+
+ train_context(int n_embd_, int n_layers_) {
+ n_embd = n_embd_;
+ n_layers = n_layers_;
+ struct ggml_init_params params_ggml = {
+ /*.mem_size =*/ ggml_tensor_overhead() * (n_layers - 1) * 2u,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+ ctx_ggml = ggml_init(params_ggml);
+ for (int il = 0; il < n_layers - 1; il++) {
+ std::vector empty;
+ v_diff_tmp.push_back(empty);
+ auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
+ t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
+ v_final.push_back(t);
+ }
+ }
+
+ // add new rows into existing tensor in v_diff_tmp
+ void concat_diff_tmp(const std::vector & diff_filtered) {
+ GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
+ for (int il = 0; il < n_layers - 1; il++) {
+ auto t = diff_filtered[il];
+ auto & diff_tmp = v_diff_tmp[il];
+ size_t curr_size = diff_tmp.size();
+ diff_tmp.resize(curr_size + ggml_nbytes(t));
+ memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
+ }
+ }
+
+ // build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
+ // TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
+ void build_v_diff() {
+ printf("build_v_diff\n");
+ for (int il = 0; il < n_layers - 1; il++) {
+ auto & diff_tmp = v_diff_tmp[il];
+ int n_elem = diff_tmp.size() / sizeof(float);
+ GGML_ASSERT(n_elem % n_embd == 0);
+ int n_rows = n_elem / n_embd;
+ struct ggml_tensor * diff = ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd);
+ ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
+ // copy data & transpose
+ diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
+ float * arr = (float *) diff_tmp.data();
+ for (int ir = 0; ir < n_rows; ++ir) {
+ for (int ic = 0; ic < n_embd; ++ic) {
+ float f = arr[ir*n_embd + ic];
+ ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
+ }
+ }
+ v_diff.push_back(diff);
+ print_debug_tensor(diff);
+ // free memory of diff_tmp
+ diff_tmp.resize(0);
+ }
+ }
+
+ ~train_context() {
+ for (auto ptr : v_final) free(ptr->data);
+ for (auto ptr : v_diff) free(ptr->data);
+ // no need to free v_diff_tmp, since we didn't use malloc
+ ggml_free(ctx_ggml);
+ }
+};
+
+struct tokenized_prompt {
+ std::vector tokens_pos;
+ std::vector tokens_neg;
+ size_t max_seq_len;
+
+ tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
+ const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
+ tokens_pos = ::llama_tokenize(ctx, pos, add_bos);
+ tokens_neg = ::llama_tokenize(ctx, neg, add_bos);
+ max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
+ padding_seq(ctx, tokens_pos, max_seq_len);
+ padding_seq(ctx, tokens_neg, max_seq_len);
+ }
+
+ void padding_seq(llama_context * ctx, std::vector & tokens, size_t len) {
+ // TODO: customize padding token
+ std::vector pad_tokens = ::llama_tokenize(ctx, " ", false);
+ llama_token pad_tok = pad_tokens.back();
+ while (tokens.size() < len) {
+ tokens.push_back(pad_tok);
+ }
+ }
+};
+
+//////////////////////////////////////////////////
+
+template
+static std::string to_string(const T & val) {
+ std::stringstream ss;
+ ss << val;
+ return ss.str();
+}
+
+static std::vector ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
+ std::vector output;
+ std::ifstream file(path);
+ if (!file.is_open()) {
+ fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
+ exit(1);
+ }
+ std::string line;
+ while (std::getline(file, line)) {
+ bool is_skip = skip_empty_lines && line.empty();
+ if (!is_skip) {
+ string_process_escapes(line);
+ output.push_back(line);
+ }
+ }
+ file.close();
+ return output;
+}
+
+//////////////////////////////////////////////////
+
+static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
+ auto * cb_data = (callback_data *) user_data;
+ static const char * l_out_name = "l_out";
+ const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
+
+ if (ask) {
+ return is_l_out;
+ }
+
+ if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
+ return true;
+ }
+
+ // save the tensor to current context
+ cb_data->save_tensor_for_layer(t);
+ return true;
+}
+
+static bool get_hidden_layers(llama_context * ctx, std::vector & tokens) {
+ llama_kv_cache_clear(ctx);
+ if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
+ fprintf(stderr, "%s : failed to eval\n", __func__);
+ return false;
+ }
+ return true;
+}
+
+static void export_gguf(const std::vector & v_ctrl, const std::string fname, const std::string model_hint) {
+ struct gguf_context * ctx = gguf_init_empty();
+
+ const std::string arch = "controlvector";
+ gguf_set_val_str(ctx, "general.architecture", arch.c_str());
+ gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
+ gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
+
+ for (size_t i = 0; i < v_ctrl.size(); ++i) {
+ gguf_add_tensor(ctx, v_ctrl[i]);
+ print_debug_tensor(v_ctrl[i]);
+ printf("Added tensor: %s\n", v_ctrl[i]->name);
+ }
+
+ printf("%s: writing file...\n", __func__);
+ gguf_write_to_file(ctx, fname.c_str(), false);
+ printf("%s: wrote file '%s'\n", __func__, fname.c_str());
+ gguf_free(ctx);
+}
+
+/**
+ * Load prompt files and completion file.
+ * Then format each pair of prompt + completion to make an entry.
+ */
+static int prepare_entries(gpt_params & params, train_context & ctx_train) {
+ // load prompts
+ std::vector positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
+ std::vector negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
+ if (positive_prompts.size() != negative_prompts.size()) {
+ fprintf(stderr, "number of positive and negative prompts must be equal\n");
+ return 1;
+ }
+ if (positive_prompts.empty()) {
+ fprintf(stderr, "must provide at least one prompt pair\n");
+ return 1;
+ }
+
+ // create templated prompts
+ std::vector completions = ctrlvec_load_prompt_file(params.cvector_completions_file, false);
+ auto format_template = [](std::string persona, std::string suffix) {
+ // entry in positive/negative.txt must already be formatted i.e. "[INST] Act as if you're extremely happy. [/INST]"
+ return persona + " " + suffix;
+ };
+ for (size_t i = 0; i < positive_prompts.size(); ++i) {
+ for (int j = 0; j < std::min((int) completions.size(), params.n_completions); ++j) {
+ // TODO replicate the truncations done by the python implementation
+ ctx_train.positive_entries.push_back(format_template(positive_prompts[i], completions[j]));
+ ctx_train.negative_entries.push_back(format_template(negative_prompts[i], completions[j]));
+ }
+ }
+ return 0;
+}
+
+int main(int argc, char ** argv) {
+ gpt_params params;
+
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
+ }
+
+ if (params.n_pca_iterations % params.n_pca_batch != 0) {
+ fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
+ return 1;
+ }
+
+
+ callback_data cb_data;
+
+ // pass the callback to the backend scheduler
+ // it will be executed for each node during the graph computation
+ params.cb_eval = cb_eval;
+ params.cb_eval_user_data = &cb_data;
+ params.warmup = false;
+
+ print_build_info();
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ // load the model to get hparams
+ llama_model * model;
+ llama_context * ctx;
+ std::tie(model, ctx) = llama_init_from_gpt_params(params);
+
+ // int n_ctx = llama_n_ctx(ctx);
+ int n_layers = llama_n_layer(model);
+ int n_embd = llama_n_embd(model);
+ // get model hint param (a.k.a model arch name)
+ char model_hint[128];
+ llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
+
+ // init train_context
+ train_context ctx_train(n_embd, n_layers);
+
+ // load and prepare entries for training
+ prepare_entries(params, ctx_train);
+
+ // we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
+ std::vector tokenized_prompts;
+ size_t n_total_tokens = 0;
+ for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
+ tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
+ n_total_tokens += 2 * t.max_seq_len;
+ tokenized_prompts.push_back(std::move(t));
+ }
+
+ std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
+
+ for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
+ bool success = false;
+ tokenized_prompt t = tokenized_prompts[i];
+ cb_data.n_layers = n_layers;
+ cb_data.n_tokens = t.max_seq_len;
+
+ printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
+ (int) i+1, (int) ctx_train.positive_entries.size(),
+ tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
+ tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
+ (int) t.max_seq_len);
+
+ cb_data.is_eval_pos = true;
+ success = get_hidden_layers(ctx, t.tokens_pos);
+ if (!success) break;
+
+ cb_data.is_eval_pos = false;
+ success = get_hidden_layers(ctx, t.tokens_neg);
+ if (!success) break;
+
+ // calculate diff and remove all zero rows
+ auto v_diff_filtered = cb_data.calc_diff();
+
+ // save & concat the filtered v_diff to ctx_train
+ ctx_train.concat_diff_tmp(v_diff_filtered);
+
+ // reset for next iteration
+ cb_data.reset();
+ }
+
+ // done with the model, we can now free it to make gain some memory
+ printf("Done evaluate prompts, unload model...\n");
+ llama_free(ctx);
+ llama_free_model(model);
+
+ // prepare ctx_train for PCA
+ ctx_train.build_v_diff();
+
+ // run PCA
+ PCA::pca_params pca_params;
+ pca_params.n_threads = params.n_threads;
+ pca_params.n_batch = params.n_pca_batch;
+ pca_params.n_iterations = params.n_pca_iterations;
+ PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
+
+ // write output vectors to gguf
+ export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint);
+
+ llama_backend_free();
+
+ return 0;
+}
diff --git a/examples/cvector-generator/negative.txt b/examples/cvector-generator/negative.txt
new file mode 100644
index 000000000..2ac3387f1
--- /dev/null
+++ b/examples/cvector-generator/negative.txt
@@ -0,0 +1 @@
+[INST] Act like a person who is extremely sad. [/INST]
\ No newline at end of file
diff --git a/examples/cvector-generator/pca.hpp b/examples/cvector-generator/pca.hpp
new file mode 100644
index 000000000..36eadaac2
--- /dev/null
+++ b/examples/cvector-generator/pca.hpp
@@ -0,0 +1,322 @@
+#include "common.h"
+#include "llama.h"
+#include "ggml.h"
+
+#ifdef GGML_USE_CUDA
+#include "ggml-cuda.h"
+#endif
+
+#ifdef GGML_USE_METAL
+#include "ggml-metal.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define DEBUG_POS 5
+
+static void print_debug_tensor(struct ggml_tensor * t, bool with_data = true) {
+ printf("%s: %s (%s): [%d, %d]\n", __func__, t->name, ggml_type_name(t->type), (int) t->ne[0], (int) t->ne[1]);
+ if (!with_data) return;
+ printf("%s: %s[0] = [", __func__, t->name);
+ for (size_t i = 0; i <= DEBUG_POS; i++) {
+ printf(" %f,", ggml_get_f32_nd(t, i, 0, 0, 0));
+ }
+ printf(" ... ]\n");
+}
+
+namespace PCA {
+
+// input params for PCA computations
+struct pca_params {
+ int n_threads = 1;
+ int n_batch = 20; // number of iterations do to in one batch. larger the batch, more memory is used
+ int n_iterations = 1000;
+ float tolerance = 1e-7;
+
+ // for debugging
+ int i_layer = 0;
+ int n_layers = 0;
+};
+
+// result from each iteration
+struct pca_result {
+ struct ggml_tensor * calculated_square = NULL;
+ std::vector eigenvectors;
+ std::vector distances;
+};
+
+struct pca_model {
+ ggml_backend_t backend = NULL;
+ ggml_backend_buffer_t buffer;
+ struct ggml_context * ctx; // context to compute graph on target device
+ struct ggml_context * ctx_host; // host context to store results
+
+ // tensors on target device
+ struct ggml_tensor * dev_input;
+ struct ggml_tensor * dev_square;
+ struct ggml_tensor * dev_eigenvector;
+
+ pca_model(struct ggml_tensor * t_input) {
+#ifdef GGML_USE_CUDA
+ fprintf(stderr, "%s: using CUDA backend\n", __func__);
+ backend = ggml_backend_cuda_init(0); // init device 0
+ if (!backend) {
+ fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
+ }
+#endif
+
+// TODO: enable Metal support when support for GGML_OP_SQRT is added
+// #ifdef GGML_USE_METAL
+// fprintf(stderr, "%s: using Metal backend\n", __func__);
+// backend = ggml_backend_metal_init();
+// if (!backend) {
+// fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
+// }
+// #endif
+
+ // if there aren't GPU Backends fallback to CPU backend
+ if (!backend) {
+ backend = ggml_backend_cpu_init();
+ }
+
+ const int num_tensors = 4;
+ struct ggml_init_params params {
+ /*.mem_size =*/ ggml_tensor_overhead() * num_tensors,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+ ctx = ggml_init(params);
+
+ auto n_samples = t_input->ne[0];
+ auto n_embd = t_input->ne[1];
+
+ dev_input = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_samples, n_embd);
+ dev_square = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
+ dev_eigenvector = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
+
+ ggml_set_name(dev_input, "dev_input");
+ ggml_set_name(dev_square, "dev_square");
+ ggml_set_name(dev_eigenvector, "dev_eigenvector");
+ buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
+ ggml_backend_tensor_set(dev_input, t_input->data, 0, ggml_nbytes(t_input));
+
+ // initialize eigenvector to random normalized vector
+ {
+ std::vector random_vec(ggml_nelements(dev_eigenvector), 0.0);
+ std::default_random_engine generator(static_cast(std::time(0)));
+ std::uniform_real_distribution distribution(0.0, 1.0);
+ float sum_sqr = 0.0; // for normalizing random_vec
+ for (size_t i = 0; i < random_vec.size(); ++i) {
+ float f = distribution(generator);
+ sum_sqr += f * f;
+ random_vec[i] = f;
+ }
+ // normalize it
+ float random_vec_norm = std::sqrt(sum_sqr);
+ for (size_t i = 0; i < random_vec.size(); ++i) {
+ random_vec[i] /= random_vec_norm;
+ }
+ ggml_backend_tensor_set(dev_eigenvector, random_vec.data(), 0, ggml_nbytes(dev_eigenvector));
+ }
+ }
+
+ ~pca_model() {
+ ggml_free(ctx);
+ ggml_backend_buffer_free(buffer);
+ ggml_backend_free(backend);
+ }
+};
+
+static struct ggml_cgraph * build_graph_piter(
+ const struct pca_params & params,
+ const pca_model & model,
+ bool calc_square = false) {
+ GGML_ASSERT(params.n_batch > 0);
+ // TODO: buf_size must be able to scale with params.n_batch
+ static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
+ static std::vector buf(buf_size);
+
+ struct ggml_init_params params0 = {
+ /*.mem_size =*/ buf_size,
+ /*.mem_buffer =*/ buf.data(),
+ /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
+ };
+ // create a temporally context to build the graph
+ struct ggml_context * ctx0 = ggml_init(params0);
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ // turn v_diff_original into square matrix if needed
+ struct ggml_tensor * tmp_square;
+ if (calc_square) {
+ tmp_square = ggml_mul_mat(ctx0, model.dev_input, model.dev_input);
+ ggml_set_name(tmp_square, "tmp_square");
+ }
+
+ struct ggml_tensor * b_tensor;
+ struct ggml_tensor * distance;
+ struct ggml_tensor * old_eigen = model.dev_eigenvector;
+ struct ggml_tensor * input_square = calc_square ? tmp_square : model.dev_square;
+
+ for (int i = 0; i < params.n_batch; ++i) {
+ // b_tensor = square * eigenvector^T
+ b_tensor = ggml_mul_mat(ctx0, input_square, old_eigen);
+ ggml_set_name(b_tensor, "b_tensor");
+
+ // normalize
+ b_tensor = ggml_div_inplace(ctx0,
+ b_tensor,
+ ggml_sqrt_inplace(ctx0, ggml_sum_rows(ctx0, ggml_sqr(ctx0, b_tensor)))
+ );
+ ggml_format_name(b_tensor, "b_tensor_norm_%d", i);
+
+ // calculate distance(new eigenvector - old eigenvector)
+ // we don't use ggml_sub because it may not be implemented on GPU backend
+ struct ggml_tensor * new_sub_old = ggml_add(ctx0, old_eigen, ggml_scale(ctx0, b_tensor, -1));
+ distance = ggml_sqrt_inplace(ctx0,
+ ggml_sum_rows(ctx0, ggml_sqr_inplace(ctx0, new_sub_old)));
+ ggml_format_name(distance, "distance_%d", i);
+
+ old_eigen = b_tensor;
+
+ // build operations nodes
+ ggml_build_forward_expand(gf, distance);
+ }
+
+ // delete the temporally context used to build the graph
+ ggml_free(ctx0);
+ return gf;
+}
+
+static ggml_status compute_piter(
+ const struct pca_params & params,
+ const pca_model & model,
+ struct ggml_cgraph * gf,
+ ggml_gallocr_t allocr,
+ struct pca_result & result) {
+ // allocate tensors
+ ggml_gallocr_alloc_graph(allocr, gf);
+
+ if (ggml_backend_is_cpu(model.backend)) {
+ ggml_backend_cpu_set_n_threads(model.backend, params.n_threads);
+ }
+
+// TODO: enable GPU support when support for GGML_OP_SQRT is added
+//#ifdef GGML_USE_METAL
+// if (ggml_backend_is_metal(model.backend)) {
+// ggml_backend_metal_set_n_cb(model.backend, params.n_threads);
+// }
+//#endif
+
+ ggml_status res = ggml_backend_graph_compute(model.backend, gf);
+ if (res == GGML_STATUS_SUCCESS) {
+ auto extract_i = [](std::string prefix, std::string str) -> int {
+ int i = -1;
+ if (str.rfind(prefix, 0) == 0) {
+ sscanf(str.c_str(), (prefix + "%d").c_str(), &i);
+ }
+ return i;
+ };
+ result.calculated_square = NULL;
+ result.eigenvectors.clear();
+ result.distances.clear();
+ result.eigenvectors.resize(params.n_batch);
+ result.distances.resize(params.n_batch);
+ // get output nodes
+ for (int i = 0; i < gf->n_nodes; ++i) {
+ auto node = gf->nodes[i];
+ int iter = -1;
+ // find b_tensor (without copying data from device)
+ if ((iter = extract_i("b_tensor_norm_", node->name)) > -1) {
+ result.eigenvectors[iter] = node;
+ }
+ // find distances, then copy data from device
+ if ((iter = extract_i("distance_", node->name)) > -1) {
+ float d;
+ ggml_backend_tensor_get(node, &d, 0, sizeof(float));
+ result.distances[iter] = d;
+ // std::cout << node->name << " = " << d << "\n";
+ }
+ // find tmp_square if it exists (without copying data from device)
+ if (std::string(node->name) == "tmp_square") {
+ result.calculated_square = node;
+ }
+ }
+ }
+ return res;
+}
+
+static void power_iteration(
+ const struct pca_params & params,
+ struct ggml_tensor * input, // shape of input: [n_samples, n_embd]
+ struct ggml_tensor * output) {
+ //printf("in power iteration\n");
+ struct pca_model model(input);
+
+ ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
+ struct pca_result result;
+ struct ggml_tensor * last_eigenvector = NULL;
+
+ int n_iters = params.n_iterations / params.n_batch; // more batch, fewer iterations
+ for (int iter = 0; iter < n_iters; ++iter) {
+ bool calc_square = (iter == 0); // only need to calculate square for first iteration
+ struct ggml_cgraph * gf = build_graph_piter(params, model, calc_square);
+ // ggml_graph_dump_dot(gf, nullptr, "/tmp/_cgraph.dot");
+ compute_piter(params, model, gf, allocr, result);
+
+ for (size_t k = 0; k < result.distances.size(); ++k) {
+ last_eigenvector = result.eigenvectors[k];
+ if (result.distances[k] < params.tolerance) {
+ break; // done
+ }
+ }
+
+ if (calc_square) {
+ // copy and store the square matrix if needed
+ GGML_ASSERT(result.calculated_square != NULL);
+ ggml_backend_tensor_copy(result.calculated_square, model.dev_square);
+ }
+
+ {
+ // copy last eigen vector and store as input for next iteration
+ GGML_ASSERT(last_eigenvector != NULL);
+ ggml_backend_tensor_copy(last_eigenvector, model.dev_eigenvector);
+ }
+
+ printf("%s: layer %d/%d, iteration: %d / total: %d (batch = %d) ...\n",
+ __func__, params.i_layer+1, params.n_layers, iter, n_iters, params.n_batch);
+ }
+
+ // get output tensor
+ GGML_ASSERT(last_eigenvector);
+ ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector));
+ //print_debug_tensor(output);
+ ggml_gallocr_free(allocr);
+}
+
+static void run_pca(
+ struct pca_params & params,
+ const std::vector & v_input, // shape of v_input[0]: [n_samples, n_embd]
+ const std::vector & v_output) {
+ printf("%s: Running PCA...\n", __func__);
+ for (size_t il = 0; il < v_input.size(); ++il) {
+
+ // prepare output vector
+ struct ggml_tensor * ctrl_out = v_output[il];
+ ggml_format_name(ctrl_out, "direction.%ld", il+1);
+
+ // run power_iteration
+ params.i_layer = il;
+ params.n_layers = v_input.size();
+ power_iteration(params, v_input[il], ctrl_out);
+ printf("%s: Done layer %d / %d\n", __func__, (int) il+1, (int) v_input.size());
+ }
+}
+
+}
diff --git a/examples/cvector-generator/positive.txt b/examples/cvector-generator/positive.txt
new file mode 100644
index 000000000..f28e9aa1a
--- /dev/null
+++ b/examples/cvector-generator/positive.txt
@@ -0,0 +1 @@
+[INST] Act like a person who is extremely happy. [/INST]
\ No newline at end of file
diff --git a/flake.lock b/flake.lock
index 7272e65fa..5278fb68a 100644
--- a/flake.lock
+++ b/flake.lock
@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
- "lastModified": 1717786204,
- "narHash": "sha256-4q0s6m0GUcN7q+Y2DqD27iLvbcd1G50T2lv08kKxkSI=",
+ "lastModified": 1718318537,
+ "narHash": "sha256-4Zu0RYRcAY/VWuu6awwq4opuiD//ahpc2aFHg2CWqFY=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "051f920625ab5aabe37c920346e3e69d7d34400e",
+ "rev": "e9ee548d90ff586a6471b4ae80ae9cfcbceb3420",
"type": "github"
},
"original": {
diff --git a/ggml-backend.c b/ggml-backend.c
index 2bec7bea3..26dce7f72 100644
--- a/ggml-backend.c
+++ b/ggml-backend.c
@@ -1172,7 +1172,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
// check if a backend with higher prio wants to offload the op
if (src_backend_id == sched->n_backends - 1) {
for (int b = 0; b < src_backend_id; b++) {
- if (ggml_backend_offload_op(sched->backends[b], tensor)) {
+ if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
SET_CAUSE(tensor, "1.off");
return b;
}
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index 593fa4cda..b8298ab20 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -2267,6 +2267,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
case GGML_OP_SQR:
ggml_cuda_op_sqr(ctx, dst);
break;
+ case GGML_OP_SQRT:
+ ggml_cuda_op_sqrt(ctx, dst);
+ break;
case GGML_OP_CLAMP:
ggml_cuda_op_clamp(ctx, dst);
break;
@@ -2830,6 +2833,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_OP_RMS_NORM:
case GGML_OP_SCALE:
case GGML_OP_SQR:
+ case GGML_OP_SQRT:
case GGML_OP_CLAMP:
case GGML_OP_CONT:
case GGML_OP_DIAG_MASK_INF:
diff --git a/ggml-cuda/mmvq.cu b/ggml-cuda/mmvq.cu
index 5f056e91e..e8d157169 100644
--- a/ggml-cuda/mmvq.cu
+++ b/ggml-cuda/mmvq.cu
@@ -117,7 +117,7 @@ static __global__ void mul_mat_vec_q(
tmp[j][i] = warp_reduce_sum(tmp[j][i]);
}
- if (threadIdx.x < rows_per_cuda_block) {
+ if (threadIdx.x < rows_per_cuda_block && (rows_per_cuda_block == 1 || row0 + threadIdx.x < nrows_dst)) {
dst[j*nrows_dst + row0 + threadIdx.x] = tmp[j][threadIdx.x];
}
}
diff --git a/ggml-cuda/unary.cu b/ggml-cuda/unary.cu
index a5ff96320..f9e208011 100644
--- a/ggml-cuda/unary.cu
+++ b/ggml-cuda/unary.cu
@@ -92,6 +92,15 @@ static __global__ void sqr_f32(const float * x, float * dst, const int k) {
dst[i] = x[i] * x[i];
}
+static __global__ void sqrt_f32(const float * x, float * dst, const int k) {
+ const int i = blockDim.x*blockIdx.x + threadIdx.x;
+
+ if (i >= k) {
+ return;
+ }
+ dst[i] = sqrtf(x[i]);
+}
+
static void gelu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE;
gelu_f32<<>>(x, dst, k);
@@ -142,6 +151,11 @@ static void sqr_f32_cuda(const float * x, float * dst, const int k, cudaStream_t
sqr_f32<<>>(x, dst, k);
}
+static void sqrt_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
+ const int num_blocks = (k + CUDA_SQRT_BLOCK_SIZE - 1) / CUDA_SQRT_BLOCK_SIZE;
+ sqrt_f32<<>>(x, dst, k);
+}
+
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];
const float * src0_d = (const float *)src0->data;
@@ -284,3 +298,17 @@ void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
sqr_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
}
+
+void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+ const float * src0_d = (const float *)src0->data;
+ float * dst_d = (float *)dst->data;
+ cudaStream_t stream = ctx.stream();
+
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ sqrt_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
+}
diff --git a/ggml-cuda/unary.cuh b/ggml-cuda/unary.cuh
index a1d07c04f..4cfb0479e 100644
--- a/ggml-cuda/unary.cuh
+++ b/ggml-cuda/unary.cuh
@@ -8,6 +8,7 @@
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
#define CUDA_HARDSWISH_BLOCK_SIZE 256
#define CUDA_SQR_BLOCK_SIZE 256
+#define CUDA_SQRT_BLOCK_SIZE 256
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
@@ -28,3 +29,5 @@ void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
+
+void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
diff --git a/ggml-quants.c b/ggml-quants.c
index 9f864e5c4..0b346c11e 100644
--- a/ggml-quants.c
+++ b/ggml-quants.c
@@ -4,8 +4,6 @@
#include "ggml-quants.h"
#include "ggml-impl.h"
-#define GGML_COMMON_IMPL_C
-#include "ggml-common.h"
#include
#include
@@ -1078,6 +1076,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int64_t k)
}
vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]);
vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]);
+ }
#elif defined(__loongarch_asx)
for (int i = 0; i < nb; i++) {
@@ -1437,6 +1436,7 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int64_t k)
accv = vec_add(accv, vec_sld(accv, accv, 4));
accv = vec_add(accv, vec_sld(accv, accv, 8));
y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0));
+ }
#elif defined(__loongarch_asx)
for (int i = 0; i < nb; i++) {
@@ -4113,12 +4113,13 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector signed int v0 = vec_splats((int32_t)0);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
const vector signed char v8 = vec_splats((signed char)0x8);
vector float vsumf0 = vec_splats(0.0f);
-#pragma GCC unroll 4
+#pragma GCC unroll 8
for (int i = 0; i < nb; i++) {
__builtin_prefetch(x[i].qs, 0, 1);
__builtin_prefetch(y[i].qs, 0, 1);
@@ -4140,9 +4141,10 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r
vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
- qv0 = vec_add(qv0, qv1);
+ vector signed int vsumi0 = v0;
- vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
+ vsumi0 = vec_sum4s(qv0, vsumi0);
+ vsumi0 = vec_sum4s(qv1, vsumi0);
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
}
@@ -4516,6 +4518,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector signed int v0 = vec_splats((int32_t)0);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
vector float vsumf0 = vec_splats(0.0f);
@@ -4537,15 +4540,13 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r
vector signed char q8y0 = vec_xl( 0, y[i].qs);
vector signed char q8y1 = vec_xl(16, y[i].qs);
- vector signed char q4x0 = vec_and(qxs, lowMask);
- vector signed char q4x1 = vec_sr(qxs, v4);
+ vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask);
+ vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4);
- vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
- vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
+ vector signed int vsumi0 = v0;
- qv0 = vec_add(qv0, qv1);
-
- vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
+ vsumi0 = vec_msum(q8y0, q4x0, vsumi0);
+ vsumi0 = vec_msum(q8y1, q4x1, vsumi0);
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
}
@@ -5247,6 +5248,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector signed int v0 = vec_splats((int32_t)0);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
vector float vsumf0 = vec_splats(0.0f);
@@ -5272,18 +5274,16 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r
vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs);
- vector signed char q5x0 = vec_or(vec_and(qxs, lowMask), qh0);
- vector signed char q5x1 = vec_or(vec_sr(qxs, v4), qh1);
+ vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0);
+ vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1);
vector signed char q8y0 = vec_xl( 0, y[i].qs);
vector signed char q8y1 = vec_xl( 16, y[i].qs);
- vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0));
- vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1));
+ vector signed int vsumi0 = v0;
- qv0 = vec_add(qv0, qv1);
-
- vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
+ vsumi0 = vec_msum(q8y0, q5x0, vsumi0);
+ vsumi0 = vec_msum(q8y1, q5x1, vsumi0);
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
}
@@ -5523,9 +5523,10 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r
*s = sumf;
#elif defined(__POWER9_VECTOR__)
+ const vector signed int v0 = vec_splats((int32_t)0);
vector float vsumf0 = vec_splats(0.0f);
-#pragma GCC unroll 4
+#pragma GCC unroll 8
for (int i = 0; i < nb; i++) {
__builtin_prefetch(x[i].qs, 0, 1);
__builtin_prefetch(y[i].qs, 0, 1);
@@ -5544,13 +5545,13 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r
vector signed short qv2 = vec_mule(q8x1, q8y1);
vector signed short qv3 = vec_mulo(q8x1, q8y1);
- vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackh(qv1));
- vector signed int vsumi1 = vec_add(vec_unpackl(qv0), vec_unpackl(qv1));
- vector signed int vsumi2 = vec_add(vec_unpackh(qv2), vec_unpackh(qv3));
- vector signed int vsumi3 = vec_add(vec_unpackl(qv2), vec_unpackl(qv3));
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
- vsumi0 = vec_add(vsumi0, vsumi2);
- vsumi1 = vec_add(vsumi1, vsumi3);
+ vsumi0 = vec_sum4s(qv0, vsumi0);
+ vsumi1 = vec_sum4s(qv1, vsumi1);
+ vsumi0 = vec_sum4s(qv2, vsumi0);
+ vsumi1 = vec_sum4s(qv3, vsumi1);
vsumi0 = vec_add(vsumi0, vsumi1);
@@ -5938,6 +5939,7 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0x3);
const vector signed char lowScaleMask = vec_splats((signed char)0xF);
+ const vector int v0 = vec_splats((int32_t)0);
const vector unsigned char v2 = vec_splats((unsigned char)0x2);
const vector unsigned char v6 = vec_splats((unsigned char)0x6);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
@@ -5975,15 +5977,17 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
+ vector signed int vsumi4 = v0;
+ vector signed int vsumi5 = v0;
+ vector signed int vsumi6 = v0;
+ vector signed int vsumi7 = v0;
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
for (int j = 0; j < QK_K/128; ++j) {
__builtin_prefetch(q2, 0, 1);
@@ -5993,14 +5997,14 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed char qxs1 = (vector signed char)vec_xl(16, q2);
q2 += 32;
- vector signed char q2x00 = vec_and(qxs0, lowMask);
- vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask);
- vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask);
- vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask);
- vector signed char q2x10 = vec_and(qxs1, lowMask);
- vector signed char q2x11 = vec_and(vec_sr(qxs1, v2), lowMask);
- vector signed char q2x12 = vec_and(vec_sr(qxs1, v4), lowMask);
- vector signed char q2x13 = vec_and(vec_sr(qxs1, v6), lowMask);
+ vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask);
+ vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask);
+ vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask);
+ vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask);
+ vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask);
+ vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask);
+ vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask);
+ vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask);
vector signed char q8y00 = vec_xl( 0, q8);
vector signed char q8y10 = vec_xl( 16, q8);
@@ -6012,45 +6016,36 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed char q8y13 = vec_xl(112, q8);
q8 += 128;
- vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00));
- vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01));
- vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02));
- vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03));
- vector signed short qv4 = vec_add(vec_mule(q2x10, q8y10), vec_mulo(q2x10, q8y10));
- vector signed short qv5 = vec_add(vec_mule(q2x11, q8y11), vec_mulo(q2x11, q8y11));
- vector signed short qv6 = vec_add(vec_mule(q2x12, q8y12), vec_mulo(q2x12, q8y12));
- vector signed short qv7 = vec_add(vec_mule(q2x13, q8y13), vec_mulo(q2x13, q8y13));
+ vector signed int qv0 = vec_msum(q8y00, q2x00, v0);
+ vector signed int qv1 = vec_msum(q8y01, q2x01, v0);
+ vector signed int qv2 = vec_msum(q8y02, q2x02, v0);
+ vector signed int qv3 = vec_msum(q8y03, q2x03, v0);
+ vector signed int qv4 = vec_msum(q8y10, q2x10, v0);
+ vector signed int qv5 = vec_msum(q8y11, q2x11, v0);
+ vector signed int qv6 = vec_msum(q8y12, q2x12, v0);
+ vector signed int qv7 = vec_msum(q8y13, q2x13, v0);
- vector signed short vscales_h = vec_unpackh(vscales);
- vector signed short vs0 = vec_splat(vscales_h, 0);
- vector signed short vs1 = vec_splat(vscales_h, 1);
- vector signed short vs2 = vec_splat(vscales_h, 2);
- vector signed short vs3 = vec_splat(vscales_h, 3);
- vector signed short vs4 = vec_splat(vscales_h, 4);
- vector signed short vs5 = vec_splat(vscales_h, 5);
- vector signed short vs6 = vec_splat(vscales_h, 6);
- vector signed short vs7 = vec_splat(vscales_h, 7);
+ vector signed short vscales_07 = vec_unpackh(vscales);
+ vector signed int vscales_03 = vec_unpackh(vscales_07);
+ vector signed int vscales_47 = vec_unpackl(vscales_07);
+ vector signed int vs0 = vec_splat(vscales_03, 0);
+ vector signed int vs1 = vec_splat(vscales_03, 1);
+ vector signed int vs2 = vec_splat(vscales_03, 2);
+ vector signed int vs3 = vec_splat(vscales_03, 3);
+ vector signed int vs4 = vec_splat(vscales_47, 0);
+ vector signed int vs5 = vec_splat(vscales_47, 1);
+ vector signed int vs6 = vec_splat(vscales_47, 2);
+ vector signed int vs7 = vec_splat(vscales_47, 3);
vscales = vec_sld(vscales, vscales, 8);
- qv0 = vec_mul(qv0, vs0);
- qv1 = vec_mul(qv1, vs2);
- qv2 = vec_mul(qv2, vs4);
- qv3 = vec_mul(qv3, vs6);
-
- qv0 = vec_madd(qv4, vs1, qv0);
- qv1 = vec_madd(qv5, vs3, qv1);
- qv2 = vec_madd(qv6, vs5, qv2);
- qv3 = vec_madd(qv7, vs7, qv3);
-
- vsumi0 = vec_add(vec_unpackh(qv0), vsumi0);
- vsumi1 = vec_add(vec_unpackh(qv1), vsumi1);
- vsumi2 = vec_add(vec_unpackh(qv2), vsumi2);
- vsumi3 = vec_add(vec_unpackh(qv3), vsumi3);
-
- vsumi4 = vec_add(vec_unpackl(qv0), vsumi4);
- vsumi5 = vec_add(vec_unpackl(qv1), vsumi5);
- vsumi6 = vec_add(vec_unpackl(qv2), vsumi6);
- vsumi7 = vec_add(vec_unpackl(qv3), vsumi7);
+ vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0);
+ vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1);
+ vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2);
+ vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3);
+ vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4);
+ vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5);
+ vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6);
+ vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7);
}
vsumi0 = vec_add(vsumi0, vsumi4);
@@ -6641,6 +6636,9 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0x3);
+ const vector signed char lowMask1 = vec_splats((int8_t)0xf);
+ const vector signed char lowMask2 = vec_splats((int8_t)0x30);
+ const vector int v0 = vec_splats((int32_t)0);
const vector signed char v1 = vec_splats((signed char)0x1);
const vector unsigned char v2 = vec_splats((unsigned char)0x2);
const vector unsigned char v3 = vec_splats((unsigned char)0x3);
@@ -6658,30 +6656,33 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector float vyd = vec_splats(y[i].d);
vector float vd = vec_mul(vxd, vyd);
- uint32_t aux[3];
- uint32_t utmp[4];
+ UNUSED(kmask1);
+ UNUSED(kmask2);
- memcpy(aux, x[i].scales, 12);
- utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
- utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
- utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
- utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+ vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8);
+ vector signed char u1 = vec_and(u0, lowMask1);
+ vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4);
+ vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2));
+ vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4);
+ vector signed char u31 = vec_and(u3, lowMask2);
- vector signed char vscales = (vector signed char)vec_xl( 0, utmp);
+ u1 = vec_or(u1, u30);
+ u2 = vec_or(vec_sr(u0, v4), u31);
+
+ vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2);
vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask);
vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask);
vscales = vec_sub(vscales, off);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
-
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
+ vector signed int vsumi4 = v0;
+ vector signed int vsumi5 = v0;
+ vector signed int vsumi6 = v0;
+ vector signed int vsumi7 = v0;
const uint8_t * restrict q3 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
@@ -6755,23 +6756,14 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12));
vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13));
- vector signed int vsum0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0));
- vector signed int vsum1 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2));
- vector signed int vsum2 = vec_add(vec_mule(qv02, vs4), vec_mulo(qv02, vs4));
- vector signed int vsum3 = vec_add(vec_mule(qv03, vs6), vec_mulo(qv03, vs6));
- vector signed int vsum4 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1));
- vector signed int vsum5 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3));
- vector signed int vsum6 = vec_add(vec_mule(qv12, vs5), vec_mulo(qv12, vs5));
- vector signed int vsum7 = vec_add(vec_mule(qv13, vs7), vec_mulo(qv13, vs7));
-
- vsumi0 = vec_add(vsum0, vsumi0);
- vsumi1 = vec_add(vsum1, vsumi1);
- vsumi2 = vec_add(vsum2, vsumi2);
- vsumi3 = vec_add(vsum3, vsumi3);
- vsumi4 = vec_add(vsum4, vsumi4);
- vsumi5 = vec_add(vsum5, vsumi5);
- vsumi6 = vec_add(vsum6, vsumi6);
- vsumi7 = vec_add(vsum7, vsumi7);
+ vsumi0 = vec_msum(qv00, vs0, vsumi0);
+ vsumi1 = vec_msum(qv01, vs2, vsumi1);
+ vsumi2 = vec_msum(qv02, vs4, vsumi2);
+ vsumi3 = vec_msum(qv03, vs6, vsumi3);
+ vsumi4 = vec_msum(qv10, vs1, vsumi4);
+ vsumi5 = vec_msum(qv11, vs3, vsumi5);
+ vsumi6 = vec_msum(qv12, vs5, vsumi6);
+ vsumi7 = vec_msum(qv13, vs7, vsumi7);
}
vsumi0 = vec_add(vsumi0, vsumi4);
@@ -7270,6 +7262,10 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector signed char lowMask1 = vec_splats((int8_t)0x3f);
+ const vector signed char lowMask2 = vec_splats((int8_t)0x30);
+ const vector int v0 = vec_splats((int32_t)0);
+ const vector unsigned char v2 = vec_splats((uint8_t)2);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
vector float vsumf0 = vec_splats(0.0f);
@@ -7288,15 +7284,24 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
- memcpy(utmp, x[i].scales, 12);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
+ vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8);
+ vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2);
+ vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4);
+ vector signed char u3 = vec_sr(u2, v4);
+
+ vector signed char u30 = u1;
+ vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3);
+
+ u1 = vec_and(u0, lowMask1);
+ u2 = vec_or(u30, u31);
+
+ vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2);
- vector signed char utmps = (vector signed char)vec_xl( 0, utmp);
vector signed short vscales = vec_unpackh(utmps);
vector signed short q4xmins = vec_unpackl(utmps);
vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins);
@@ -7312,14 +7317,10 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
const uint8_t * restrict q4 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
@@ -7334,14 +7335,14 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed char qxs3 = (vector signed char)vec_xl(48, q4);
q4 += 64;
- vector signed char q4x00 = vec_and(qxs0, lowMask);
- vector signed char q4x01 = vec_sr(qxs0, v4);
- vector signed char q4x10 = vec_and(qxs1, lowMask);
- vector signed char q4x11 = vec_sr(qxs1, v4);
- vector signed char q4x20 = vec_and(qxs2, lowMask);
- vector signed char q4x21 = vec_sr(qxs2, v4);
- vector signed char q4x30 = vec_and(qxs3, lowMask);
- vector signed char q4x31 = vec_sr(qxs3, v4);
+ vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask);
+ vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4);
+ vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask);
+ vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4);
+ vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask);
+ vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4);
+ vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask);
+ vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4);
vector signed char q8y00 = vec_xl( 0, q8);
vector signed char q8y10 = vec_xl( 16, q8);
@@ -7353,41 +7354,33 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed char q8y31 = vec_xl(112, q8);
q8 += 128;
- vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00));
- vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01));
- vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10));
- vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11));
- vector signed short qv20 = vec_add(vec_mule(q4x20, q8y20), vec_mulo(q4x20, q8y20));
- vector signed short qv21 = vec_add(vec_mule(q4x21, q8y21), vec_mulo(q4x21, q8y21));
- vector signed short qv30 = vec_add(vec_mule(q4x30, q8y30), vec_mulo(q4x30, q8y30));
- vector signed short qv31 = vec_add(vec_mule(q4x31, q8y31), vec_mulo(q4x31, q8y31));
+ vector signed int qv00 = vec_msum(q8y00, q4x00, v0);
+ vector signed int qv01 = vec_msum(q8y01, q4x01, v0);
+ vector signed int qv10 = vec_msum(q8y10, q4x10, v0);
+ vector signed int qv11 = vec_msum(q8y11, q4x11, v0);
+ vector signed int qv20 = vec_msum(q8y20, q4x20, v0);
+ vector signed int qv21 = vec_msum(q8y21, q4x21, v0);
+ vector signed int qv30 = vec_msum(q8y30, q4x30, v0);
+ vector signed int qv31 = vec_msum(q8y31, q4x31, v0);
- vector signed short vs0 = vec_splat(vscales, 0);
- vector signed short vs1 = vec_splat(vscales, 1);
- vector signed short vs2 = vec_splat(vscales, 2);
- vector signed short vs3 = vec_splat(vscales, 3);
+ vector signed int vscales_h = vec_unpackh(vscales);
+ vector signed int vs0 = vec_splat(vscales_h, 0);
+ vector signed int vs1 = vec_splat(vscales_h, 1);
+ vector signed int vs2 = vec_splat(vscales_h, 2);
+ vector signed int vs3 = vec_splat(vscales_h, 3);
vscales = vec_sld(vscales, vscales, 8);
- qv00 = vec_add(qv00, qv10);
- qv10 = vec_add(qv01, qv11);
- qv20 = vec_add(qv20, qv30);
- qv30 = vec_add(qv21, qv31);
+ vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0);
+ vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1);
+ vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2);
+ vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3);
- vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0);
- vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1);
- vsumi2 = vec_add(vec_mule(qv10, vs1), vsumi2);
- vsumi3 = vec_add(vec_mulo(qv10, vs1), vsumi3);
- vsumi4 = vec_add(vec_mule(qv20, vs2), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv20, vs2), vsumi5);
- vsumi6 = vec_add(vec_mule(qv30, vs3), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv30, vs3), vsumi7);
+ vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0);
+ vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1);
+ vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2);
+ vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -7889,6 +7882,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector signed char lowMask1 = vec_splats((int8_t)0x3f);
+ const vector signed char lowMask2 = vec_splats((int8_t)0x30);
+ const vector int v0 = vec_splats((int32_t)0);
const vector unsigned char v1 = vec_splats((unsigned char)0x1);
const vector unsigned char v2 = vec_splats((unsigned char)0x2);
const vector unsigned char v3 = vec_splats((unsigned char)0x3);
@@ -7907,18 +7903,27 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin));
vector float vdmin = vec_mul(vxmin, vyd);
- memcpy(utmp, x[i].scales, 12);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
+ vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8);
+ vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2);
+ vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4);
+ vector signed char u3 = vec_sr(u2, v4);
+
+ vector signed char u30 = u1;
+ vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3);
+
+ u1 = vec_and(u0, lowMask1);
+ u2 = vec_or(u30, u31);
+
+ vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2);
vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
- vector signed char utmps = (vector signed char)vec_xl( 0, utmp);
vector signed short vscales = vec_unpackh(utmps);
vector signed short q5xmins = vec_unpackl(utmps);
@@ -7938,10 +7943,10 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh);
vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
const uint8_t * restrict q5 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
@@ -7966,10 +7971,10 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
qxhs0 = vec_sr(qxhs0, v2);
qxhs1 = vec_sr(qxhs1, v2);
- vector signed char q5x00 = vec_or(q5h00, qxs00);
- vector signed char q5x01 = vec_or(q5h01, qxs01);
- vector signed char q5x10 = vec_or(q5h10, qxs10);
- vector signed char q5x11 = vec_or(q5h11, qxs11);
+ vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00);
+ vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01);
+ vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10);
+ vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11);
vector signed char q8y00 = vec_xl( 0, q8);
vector signed char q8y10 = vec_xl(16, q8);
@@ -7977,22 +7982,20 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed char q8y11 = vec_xl(48, q8);
q8 += 64;
- vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00));
- vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01));
- vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10));
- vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11));
+ vector signed int qv00 = vec_msum(q8y00, q5x00, v0);
+ vector signed int qv01 = vec_msum(q8y01, q5x01, v0);
+ vector signed int qv10 = vec_msum(q8y10, q5x10, v0);
+ vector signed int qv11 = vec_msum(q8y11, q5x11, v0);
- vector signed short vs0 = vec_splat(vscales, 0);
- vector signed short vs1 = vec_splat(vscales, 1);
+ vector signed int vscales_h = vec_unpackh(vscales);
+ vector signed int vs0 = vec_splat(vscales_h, 0);
+ vector signed int vs1 = vec_splat(vscales_h, 1);
vscales = vec_sld(vscales, vscales, 12);
- qv00 = vec_add(qv00, qv10);
- qv01 = vec_add(qv01, qv11);
-
- vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0);
- vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1);
- vsumi2 = vec_add(vec_mule(qv01, vs1), vsumi2);
- vsumi3 = vec_add(vec_mulo(qv01, vs1), vsumi3);
+ vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0);
+ vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1);
+ vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2);
+ vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3);
}
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
@@ -8553,6 +8556,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector int v0 = vec_splats((int32_t)0);
const vector unsigned char v2 = vec_splats((unsigned char)0x2);
const vector unsigned char v3 = vec_splats((unsigned char)0x3);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
@@ -8569,14 +8573,14 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector float vyd = vec_splats(y[i].d);
vector float vd = vec_mul(vxd, vyd);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
+ vector signed int vsumi4 = v0;
+ vector signed int vsumi5 = v0;
+ vector signed int vsumi6 = v0;
+ vector signed int vsumi7 = v0;
const uint8_t * restrict q6 = x[i].ql;
const uint8_t * restrict qh = x[i].qh;
@@ -8656,23 +8660,14 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
vector signed short vs6 = vec_splat(vscales, 6);
vector signed short vs7 = vec_splat(vscales, 7);
- vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0);
- vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1);
- vsumi2 = vec_add(vec_mule(qv01, vs4), vsumi2);
- vsumi3 = vec_add(vec_mulo(qv01, vs4), vsumi3);
- vsumi4 = vec_add(vec_mule(qv10, vs1), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv10, vs1), vsumi5);
- vsumi6 = vec_add(vec_mule(qv11, vs5), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv11, vs5), vsumi7);
-
- vsumi0 = vec_add(vec_mule(qv20, vs2), vsumi0);
- vsumi1 = vec_add(vec_mulo(qv20, vs2), vsumi1);
- vsumi2 = vec_add(vec_mule(qv21, vs6), vsumi2);
- vsumi3 = vec_add(vec_mulo(qv21, vs6), vsumi3);
- vsumi4 = vec_add(vec_mule(qv30, vs3), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv30, vs3), vsumi5);
- vsumi6 = vec_add(vec_mule(qv31, vs7), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv31, vs7), vsumi7);
+ vsumi0 = vec_msum(qv00, vs0, vsumi0);
+ vsumi1 = vec_msum(qv01, vs4, vsumi1);
+ vsumi2 = vec_msum(qv10, vs1, vsumi2);
+ vsumi3 = vec_msum(qv11, vs5, vsumi3);
+ vsumi4 = vec_msum(qv20, vs2, vsumi4);
+ vsumi5 = vec_msum(qv21, vs6, vsumi5);
+ vsumi6 = vec_msum(qv30, vs3, vsumi6);
+ vsumi7 = vec_msum(qv31, vs7, vsumi7);
}
vsumi0 = vec_add(vsumi0, vsumi4);
@@ -8953,6 +8948,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void
*s = 0.125f * hsum_float_8(accumf);
#elif defined(__POWER9_VECTOR__)
+ const vector int v0 = vec_splats((int32_t)0);
vector float vsumf0 = vec_splats(0.0f);
vector float vsumf1 = vec_splats(0.0f);
vector float vsumf2 = vec_splats(0.0f);
@@ -8965,14 +8961,10 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void
vector float vyd = vec_splats(y[i].d);
vector float vd = vec_mul(vxd, vyd);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
const uint16_t * restrict q2 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
@@ -9019,21 +9011,12 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void
vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1));
vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1));
- vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -9425,6 +9408,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void *
*s = 0.125f * hsum_float_8(accumf);
#elif defined(__POWER9_VECTOR__)
+ const vector int v0 = vec_splats((int32_t)0);
vector float vsumf0 = vec_splats(0.0f);
vector float vsumf1 = vec_splats(0.0f);
vector float vsumf2 = vec_splats(0.0f);
@@ -9437,14 +9421,10 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void *
vector float vyd = vec_splats(y[i].d);
vector float vd = vec_mul(vxd, vyd);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
const uint16_t * restrict q2 = x[i].qs;
const uint8_t * restrict sc = x[i].scales;
@@ -9492,21 +9472,12 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void *
vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1));
vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1));
- vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales0, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales1, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales2, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales3, vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -9729,6 +9700,8 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void *
static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
+ const vector int v0 = vec_splats((int32_t)0);
+
vector float vsumf0 = vec_splats(0.0f);
vector float vsumf1 = vec_splats(0.0f);
vector float vsumf2 = vec_splats(0.0f);
@@ -9743,14 +9716,10 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void *
vector float vyd = vec_splats(y[i].d);
vector float vd = vec_mul(vxd, vyd);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
const uint8_t * restrict q2 = x[i].qs;
const uint8_t * restrict qh = x[i].qh;
@@ -9810,21 +9779,12 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void *
vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1));
vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1));
- vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales0, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales1, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales2, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales3, vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -10062,6 +10022,8 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void
#elif defined(__POWER9_VECTOR__)
const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
+ const vector int v0 = vec_splats((int32_t)0);
+
vector float vsumf0 = vec_splats(0.0f);
vector float vsumf1 = vec_splats(0.0f);
vector float vsumf2 = vec_splats(0.0f);
@@ -10072,14 +10034,10 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void
vector float vyd = vec_splats(y[i].d);
vector float vd = vec_mul(vxd, vyd);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
const uint8_t * restrict q3 = x[i].qs;
const uint32_t * restrict signs = (const uint32_t *)(x[i].qs + QK_K/4);
@@ -10124,21 +10082,12 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void
vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
- vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -10428,6 +10377,8 @@ void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void *
static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
+ const vector int v0 = vec_splats((int32_t)0);
+
vector float vsumf0 = vec_splats(0.0f);
vector float vsumf1 = vec_splats(0.0f);
vector float vsumf2 = vec_splats(0.0f);
@@ -10448,14 +10399,10 @@ void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void *
const uint8_t * restrict sc = x[i].scales;
const int8_t * restrict q8 = y[i].qs;
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
for (int j = 0; j < QK_K/32; j += 2) {
__builtin_prefetch(q3, 0, 1);
@@ -10509,21 +10456,12 @@ void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void *
vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
- vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -10804,10 +10742,6 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void
vector signed int vsumi1 = vec_splats((int32_t)0);
vector signed int vsumi2 = vec_splats((int32_t)0);
vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
vector signed int vsumi8 = vec_splats((int32_t)0);
const uint8_t * restrict q1 = x[i].qs;
@@ -10849,14 +10783,10 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void
vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
vector signed short vscales = vec_sld(vscales23, vscales01, 8);
- vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
vector signed short q8ysums = vec_xl_len(qs, 8);
qs += 4;
@@ -10871,11 +10801,6 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void
vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -11269,6 +11194,7 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void *
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector signed int v0 = vec_splats((int32_t)0);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
vector float vsumf0 = vec_splats(0.0f);
@@ -11299,8 +11225,11 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void *
vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
- vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
- vector signed int vsumi1 = vec_add(vec_unpackh(qv1), vec_unpackl(qv1));
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+
+ vsumi0 = vec_sum4s(qv0, vsumi0);
+ vsumi1 = vec_sum4s(qv1, vsumi1);
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
@@ -11455,6 +11384,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void *
#elif defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
+ const vector int v0 = vec_splats((int32_t)0);
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
vector float vsumf0 = vec_splats(0.0f);
@@ -11470,14 +11400,10 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void *
vector float vyd = vec_splats(y[ibl].d);
vector float vd = vec_mul(vxd, vyd);
- vector signed int vsumi0 = vec_splats((int32_t)0);
- vector signed int vsumi1 = vec_splats((int32_t)0);
- vector signed int vsumi2 = vec_splats((int32_t)0);
- vector signed int vsumi3 = vec_splats((int32_t)0);
- vector signed int vsumi4 = vec_splats((int32_t)0);
- vector signed int vsumi5 = vec_splats((int32_t)0);
- vector signed int vsumi6 = vec_splats((int32_t)0);
- vector signed int vsumi7 = vec_splats((int32_t)0);
+ vector signed int vsumi0 = v0;
+ vector signed int vsumi1 = v0;
+ vector signed int vsumi2 = v0;
+ vector signed int vsumi3 = v0;
uint16_t h = x[ibl].scales_h;
@@ -11522,21 +11448,12 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void *
vector signed short vscales01 = vec_splats((int16_t)ls0);
vector signed short vscales23 = vec_splats((int16_t)ls1);
- vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
- vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
- vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
- vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
- vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
- vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
- vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
- vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
}
- vsumi0 = vec_add(vsumi0, vsumi4);
- vsumi1 = vec_add(vsumi1, vsumi5);
- vsumi2 = vec_add(vsumi2, vsumi6);
- vsumi3 = vec_add(vsumi3, vsumi7);
-
vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
@@ -13139,7 +13056,7 @@ static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const u
const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) {
int num_neighbors = neighbours[0];
GGML_ASSERT(num_neighbors > 0);
- float best_score = 0;
+ float best_score = -FLT_MAX;
int grid_index = -1;
for (int j = 1; j <= num_neighbors; ++j) {
const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
@@ -13337,7 +13254,7 @@ static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy
sumw[j+1] = sumw[j] + weight[i];
}
}
- float best_score = 0, scale = max;
+ float best_score = -FLT_MIN, scale = max;
int besti1 = -1, besti2 = -1, best_shift = 0;
for (int i1 = 0; i1 <= block_size; ++i1) {
for (int i2 = i1; i2 <= block_size; ++i2) {
@@ -13513,7 +13430,7 @@ static void quantize_row_iq1_m_impl(const float * restrict x, void * restrict vy
idx[2*j] = j;
}
qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper);
- float best_score = 0, scale = max;
+ float best_score = -FLT_MIN, scale = max;
int besti1 = -1, besti2 = -1, best_k = -1;
// 0: +, +
// 1: +, -
diff --git a/ggml-rpc.cpp b/ggml-rpc.cpp
index 22d9524b8..b01ad2674 100644
--- a/ggml-rpc.cpp
+++ b/ggml-rpc.cpp
@@ -73,9 +73,13 @@ struct rpc_tensor {
uint64_t view_offs;
uint64_t data;
char name[GGML_MAX_NAME];
+
+ char padding[4];
};
#pragma pack(pop)
+static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8");
+
// RPC commands
enum rpc_cmd {
ALLOC_BUFFER = 0,
@@ -599,9 +603,8 @@ static void serialize_graph(const ggml_cgraph * cgraph, std::vector & o
int output_size = sizeof(uint32_t) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t) + n_tensors * sizeof(rpc_tensor);
output.resize(output_size, 0);
memcpy(output.data(), &n_nodes, sizeof(n_nodes));
- uint64_t * out_nodes = (uint64_t *)(output.data() + sizeof(n_nodes));
for (uint32_t i = 0; i < n_nodes; i++) {
- out_nodes[i] = reinterpret_cast(cgraph->nodes[i]);
+ memcpy(output.data() + sizeof(n_nodes) + i * sizeof(uint64_t), &cgraph->nodes[i], sizeof(uint64_t));
}
uint32_t * out_ntensors = (uint32_t *)(output.data() + sizeof(n_nodes) + n_nodes * sizeof(uint64_t));
*out_ntensors = n_tensors;
@@ -1036,7 +1039,9 @@ bool rpc_server::graph_compute(const std::vector & input, std::vector tensor_map;
for (uint32_t i = 0; i < n_nodes; i++) {
- graph->nodes[i] = create_node(nodes[i], ctx, tensor_ptrs, tensor_map);
+ int64_t id;
+ memcpy(&id, &nodes[i], sizeof(id));
+ graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map);
}
ggml_status status = ggml_backend_graph_compute(backend, graph);
// output serialization format: | status (1 byte) |
diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp
index 6f41ed272..6bd42b960 100644
--- a/ggml-sycl.cpp
+++ b/ggml-sycl.cpp
@@ -36,6 +36,8 @@
#include "ggml.h"
#include "ggml-backend-impl.h"
+#include "ggml-sycl/backend.hpp"
+
/*
Following definition copied from DPCT head files, which are used by ggml-sycl.cpp
*/
@@ -82,3020 +84,7 @@ Following definition copied from DPCT head files, which are used by ggml-sycl.cp
#define __dpct_noinline__ __attribute__((noinline))
#endif
-
-std::string get_device_type_name(const sycl::device &Device) {
- auto DeviceType = Device.get_info();
- switch (DeviceType) {
- case sycl::info::device_type::cpu:
- return "cpu";
- case sycl::info::device_type::gpu:
- return "gpu";
- case sycl::info::device_type::host:
- return "host";
- case sycl::info::device_type::accelerator:
- return "acc";
- default:
- return "unknown";
- }
-}
-
-std::string get_device_backend_and_type(const sycl::device &device) {
- std::stringstream device_type;
- sycl::backend backend = device.get_backend();
- device_type << backend << ":" << get_device_type_name(device);
- return device_type.str();
-}
-
-namespace dpct
-{
- typedef sycl::queue *queue_ptr;
- typedef sycl::event *event_ptr;
- typedef char *device_ptr;
- typedef uint8_t byte_t;
- typedef sycl::buffer buffer_t;
-
- /// SYCL default exception handler
- inline auto exception_handler = [](sycl::exception_list exceptions)
- {
- for (std::exception_ptr const &e : exceptions)
- {
- try
- {
- std::rethrow_exception(e);
- }
- catch (sycl::exception const &e)
- {
- std::cerr << "Caught asynchronous SYCL exception:" << std::endl
- << e.what() << std::endl
- << "Exception caught at file:" << __FILE__
- << ", line:" << __LINE__ << std::endl;
- }
- }
- };
-
- enum error_code
- {
- success = 0,
- default_error = 999
- };
-
- enum memcpy_direction
- {
- host_to_host,
- host_to_device,
- device_to_host,
- device_to_device,
- automatic
- };
-
- enum memory_region
- {
- global = 0, // device global memory
- constant, // device constant memory
- local, // device local memory
- shared, // memory which can be accessed by host and device
- };
-
- enum class library_data_t : unsigned char
- {
- real_float = 0,
- complex_float,
- real_double,
- complex_double,
- real_half,
- complex_half,
- real_bfloat16,
- complex_bfloat16,
- real_int4,
- complex_int4,
- real_uint4,
- complex_uint4,
- real_int8,
- complex_int8,
- real_uint8,
- complex_uint8,
- real_int16,
- complex_int16,
- real_uint16,
- complex_uint16,
- real_int32,
- complex_int32,
- real_uint32,
- complex_uint32,
- real_int64,
- complex_int64,
- real_uint64,
- complex_uint64,
- real_int8_4,
- real_int8_32,
- real_uint8_4,
- library_data_t_size
- };
-
- template
- struct DataType
- {
- using T2 = T;
- };
- template
- struct DataType>
- {
- using T2 = std::complex;
- };
-
- static void destroy_event(event_ptr event)
- {
- delete event;
- }
-
- static inline unsigned int get_tid()
- {
-#if defined(__linux__)
- return syscall(SYS_gettid);
-#elif defined(_WIN64)
- return GetCurrentThreadId();
-#else
-#error "Only support Windows and Linux."
-#endif
- }
-
- namespace detail
- {
- static void get_version(const sycl::device &dev, int &major, int &minor)
- {
- // Version string has the following format:
- // a. OpenCL
- // b.
- // c. e.g gfx1030
- std::string ver;
- ver = dev.get_info();
- std::string::size_type i = 0;
- while (i < ver.size()) {
- if (isdigit(ver[i]))
- break;
- i++;
- }
- major = std::stoi(&(ver[i]));
- while (i < ver.size()) {
- if (ver[i] == '.')
- break;
- i++;
- }
- if (i < ver.size()) {
- // a. and b.
- i++;
- minor = std::stoi(&(ver[i]));
- } else {
- // c.
- minor = 0;
- }
- }
-
- template
- class generic_error_type
- {
- public:
- generic_error_type() = default;
- generic_error_type(T value) : value{value} {}
- operator T() const { return value; }
-
- private:
- T value;
- };
-
- } // namespace detail
-
- /// Pitched 2D/3D memory data.
- class pitched_data
- {
- public:
- pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
- pitched_data(void *data, size_t pitch, size_t x, size_t y)
- : _data(data), _pitch(pitch), _x(x), _y(y) {}
-
- void *get_data_ptr() { return _data; }
- void set_data_ptr(void *data) { _data = data; }
-
- size_t get_pitch() { return _pitch; }
- void set_pitch(size_t pitch) { _pitch = pitch; }
-
- size_t get_x() { return _x; }
- void set_x(size_t x) { _x = x; };
-
- size_t get_y() { return _y; }
- void set_y(size_t y) { _y = y; }
-
- private:
- void *_data;
- size_t _pitch, _x, _y;
- };
-
- class device_info
- {
- public:
- // get interface
- const char *get_name() const { return _name; }
- char *get_name() { return _name; }
- template ,
- std::enable_if_t> ||
- std::is_same_v,
- int> = 0>
- auto get_max_work_item_sizes() const
- {
- if constexpr (std::is_same_v>)
- return sycl::range<3>(_max_work_item_sizes_i[0],
- _max_work_item_sizes_i[1],
- _max_work_item_sizes_i[2]);
- else
- {
- return _max_work_item_sizes_i;
- }
- }
- template ,
- std::enable_if_t> ||
- std::is_same_v,
- int> = 0>
- auto get_max_work_item_sizes()
- {
- if constexpr (std::is_same_v>)
- return sycl::range<3>(_max_work_item_sizes_i[0],
- _max_work_item_sizes_i[1],
- _max_work_item_sizes_i[2]);
- else
- {
- return _max_work_item_sizes_i;
- }
- }
- bool get_host_unified_memory() const { return _host_unified_memory; }
- int get_major_version() const { return _major; }
- int get_minor_version() const { return _minor; }
- int get_integrated() const { return _integrated; }
- int get_max_clock_frequency() const { return _frequency; }
- int get_max_compute_units() const { return _max_compute_units; }
- int get_max_work_group_size() const { return _max_work_group_size; }
- int get_max_sub_group_size() const { return _max_sub_group_size; }
- int get_max_work_items_per_compute_unit() const
- {
- return _max_work_items_per_compute_unit;
- }
- int get_max_register_size_per_work_group() const
- {
- return _max_register_size_per_work_group;
- }
- template ||
- std::is_same_v,
- int> = 0>
- auto get_max_nd_range_size() const
- {
- if constexpr (std::is_same_v)
- return _max_nd_range_size;
- else
- return _max_nd_range_size_i;
- }
- template ||
- std::is_same_v,
- int> = 0>
- auto get_max_nd_range_size()
- {
- if constexpr (std::is_same_v)
- return _max_nd_range_size;
- else
- return _max_nd_range_size_i;
- }
- size_t get_global_mem_size() const { return _global_mem_size; }
- size_t get_local_mem_size() const { return _local_mem_size; }
- size_t get_max_mem_alloc_size() const { return _max_mem_alloc_size; }
- /// Returns the maximum clock rate of device's global memory in kHz. If
- /// compiler does not support this API then returns default value 3200000 kHz.
- unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
- /// Returns the maximum bus width between device and memory in bits. If
- /// compiler does not support this API then returns default value 64 bits.
- unsigned int get_memory_bus_width() const { return _memory_bus_width; }
- uint32_t get_device_id() const { return _device_id; }
- std::array get_uuid() const { return _uuid; }
- /// Returns global memory cache size in bytes.
- unsigned int get_global_mem_cache_size() const
- {
- return _global_mem_cache_size;
- }
-
- // set interface
- void set_name(const char *name)
- {
- size_t length = strlen(name);
- if (length < 256)
- {
- std::memcpy(_name, name, length + 1);
- }
- else
- {
- std::memcpy(_name, name, 255);
- _name[255] = '\0';
- }
- }
- void set_max_work_item_sizes(const sycl::range<3> max_work_item_sizes)
- {
- for (int i = 0; i < 3; ++i)
- _max_work_item_sizes_i[i] = max_work_item_sizes[i];
- }
- [[deprecated]] void
- set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes)
- {
- for (int i = 0; i < 3; ++i)
- {
- _max_work_item_sizes_i[i] = max_work_item_sizes[i];
- }
- }
- void set_host_unified_memory(bool host_unified_memory)
- {
- _host_unified_memory = host_unified_memory;
- }
- void set_major_version(int major) { _major = major; }
- void set_minor_version(int minor) { _minor = minor; }
- void set_integrated(int integrated) { _integrated = integrated; }
- void set_max_clock_frequency(int frequency) { _frequency = frequency; }
- void set_max_compute_units(int max_compute_units)
- {
- _max_compute_units = max_compute_units;
- }
- void set_global_mem_size(size_t global_mem_size)
- {
- _global_mem_size = global_mem_size;
- }
- void set_local_mem_size(size_t local_mem_size)
- {
- _local_mem_size = local_mem_size;
- }
- void set_max_mem_alloc_size(size_t max_mem_alloc_size)
- {
- _max_mem_alloc_size = max_mem_alloc_size;
- }
- void set_max_work_group_size(int max_work_group_size)
- {
- _max_work_group_size = max_work_group_size;
- }
- void set_max_sub_group_size(int max_sub_group_size)
- {
- _max_sub_group_size = max_sub_group_size;
- }
- void
- set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit)
- {
- _max_work_items_per_compute_unit = max_work_items_per_compute_unit;
- }
- void set_max_nd_range_size(int max_nd_range_size[])
- {
- for (int i = 0; i < 3; i++)
- {
- _max_nd_range_size[i] = max_nd_range_size[i];
- _max_nd_range_size_i[i] = max_nd_range_size[i];
- }
- }
- void set_memory_clock_rate(unsigned int memory_clock_rate)
- {
- _memory_clock_rate = memory_clock_rate;
- }
- void set_memory_bus_width(unsigned int memory_bus_width)
- {
- _memory_bus_width = memory_bus_width;
- }
- void
- set_max_register_size_per_work_group(int max_register_size_per_work_group)
- {
- _max_register_size_per_work_group = max_register_size_per_work_group;
- }
- void set_device_id(uint32_t device_id)
- {
- _device_id = device_id;
- }
- void set_uuid(std::array uuid)
- {
- _uuid = std::move(uuid);
- }
- void set_global_mem_cache_size(unsigned int global_mem_cache_size)
- {
- _global_mem_cache_size = global_mem_cache_size;
- }
-
- private:
- char _name[256];
- int _max_work_item_sizes_i[3];
- bool _host_unified_memory = false;
- int _major;
- int _minor;
- int _integrated = 0;
- int _frequency;
- // Set estimated value 3200000 kHz as default value.
- unsigned int _memory_clock_rate = 3200000;
- // Set estimated value 64 bits as default value.
- unsigned int _memory_bus_width = 64;
- unsigned int _global_mem_cache_size;
- int _max_compute_units;
- int _max_work_group_size;
- int _max_sub_group_size;
- int _max_work_items_per_compute_unit;
- int _max_register_size_per_work_group;
- size_t _global_mem_size;
- size_t _local_mem_size;
- size_t _max_mem_alloc_size;
- size_t _max_nd_range_size[3];
- int _max_nd_range_size_i[3];
- uint32_t _device_id;
- std::array _uuid;
- };
-
- static int get_major_version(const sycl::device &dev)
- {
- int major, minor;
- detail::get_version(dev, major, minor);
- return major;
- }
-
- static int get_minor_version(const sycl::device &dev)
- {
- int major, minor;
- detail::get_version(dev, major, minor);
- return minor;
- }
-
- static void get_device_info(device_info &out, const sycl::device &dev)
- {
- device_info prop;
- prop.set_name(dev.get_info().c_str());
-
- int major, minor;
- detail::get_version(dev, major, minor);
- prop.set_major_version(major);
- prop.set_minor_version(minor);
-
- prop.set_max_work_item_sizes(
-#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION < 20220902)
- // oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes
- // is an enum class element
- dev.get_info());
-#else
- // SYCL 2020-conformant code, max_work_item_sizes is a struct templated by
- // an int
- dev.get_info>());
-#endif
- prop.set_host_unified_memory(dev.has(sycl::aspect::usm_host_allocations));
-
- prop.set_max_clock_frequency(
- dev.get_info() * 1000);
-
- prop.set_max_compute_units(
- dev.get_info());
- prop.set_max_work_group_size(
- dev.get_info());
- prop.set_global_mem_size(dev.get_info());
- prop.set_local_mem_size(dev.get_info());
- prop.set_max_mem_alloc_size(dev.get_info());
-
-#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
- if (dev.has(sycl::aspect::ext_intel_memory_clock_rate))
- {
- unsigned int tmp =
- dev.get_info();
- if (tmp != 0)
- prop.set_memory_clock_rate(1000 * tmp);
- }
- if (dev.has(sycl::aspect::ext_intel_memory_bus_width))
- {
- prop.set_memory_bus_width(
- dev.get_info());
- }
- if (dev.has(sycl::aspect::ext_intel_device_id))
- {
- prop.set_device_id(
- dev.get_info());
- }
- if (dev.has(sycl::aspect::ext_intel_device_info_uuid))
- {
- prop.set_uuid(dev.get_info());
- }
-#elif defined(_MSC_VER) && !defined(__clang__)
-#pragma message("get_device_info: querying memory_clock_rate and \
- memory_bus_width are not supported by the compiler used. \
- Use 3200000 kHz as memory_clock_rate default value. \
- Use 64 bits as memory_bus_width default value.")
-#else
-#warning "get_device_info: querying memory_clock_rate and \
- memory_bus_width are not supported by the compiler used. \
- Use 3200000 kHz as memory_clock_rate default value. \
- Use 64 bits as memory_bus_width default value."
-#endif
-
- size_t max_sub_group_size = 1;
- std::vector sub_group_sizes =
- dev.get_info();
-
- for (const auto &sub_group_size : sub_group_sizes)
- {
- if (max_sub_group_size < sub_group_size)
- max_sub_group_size = sub_group_size;
- }
-
- prop.set_max_sub_group_size(max_sub_group_size);
-
- prop.set_max_work_items_per_compute_unit(
- dev.get_info());
- int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
- prop.set_max_nd_range_size(max_nd_range_size);
-
- // Estimates max register size per work group, feel free to update the value
- // according to device properties.
- prop.set_max_register_size_per_work_group(65536);
-
- prop.set_global_mem_cache_size(
- dev.get_info());
- out = prop;
- }
-
- /// dpct device extension
- class device_ext : public sycl::device
- {
- typedef std::mutex mutex_type;
-
- public:
- device_ext() : sycl::device(), _ctx(*this) {}
- ~device_ext()
- {
- std::lock_guard lock(m_mutex);
- clear_queues();
- }
- device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this)
- {
- std::lock_guard lock(m_mutex);
- init_queues();
- }
-
- int is_native_atomic_supported() { return 0; }
- int get_major_version() const
- {
- return dpct::get_major_version(*this);
- }
-
- int get_minor_version() const
- {
- return dpct::get_minor_version(*this);
- }
-
- int get_max_compute_units() const
- {
- return get_device_info().get_max_compute_units();
- }
-
- /// Return the maximum clock frequency of this device in KHz.
- int get_max_clock_frequency() const
- {
- return get_device_info().get_max_clock_frequency();
- }
-
- int get_integrated() const { return get_device_info().get_integrated(); }
-
- int get_max_sub_group_size() const
- {
- return get_device_info().get_max_sub_group_size();
- }
-
- int get_max_register_size_per_work_group() const
- {
- return get_device_info().get_max_register_size_per_work_group();
- }
-
- int get_max_work_group_size() const
- {
- return get_device_info().get_max_work_group_size();
- }
-
- int get_mem_base_addr_align() const
- {
- return get_info();
- }
-
- size_t get_global_mem_size() const
- {
- return get_device_info().get_global_mem_size();
- }
-
- size_t get_max_mem_alloc_size() const
- {
- return get_device_info().get_max_mem_alloc_size();
- }
-
- /// Get the number of bytes of free and total memory on the SYCL device.
- /// \param [out] free_memory The number of bytes of free memory on the SYCL device.
- /// \param [out] total_memory The number of bytes of total memory on the SYCL device.
- void get_memory_info(size_t &free_memory, size_t &total_memory)
- {
- total_memory = get_device_info().get_global_mem_size();
- const char *warning_info = "get_memory_info: [warning] ext_intel_free_memory is not "
- "supported (export/set ZES_ENABLE_SYSMAN=1 to support), "
- "use total memory as free memory";
-#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
- if (!has(sycl::aspect::ext_intel_free_memory))
- {
- std::cerr << warning_info << std::endl;
- free_memory = total_memory;
- }
- else
- {
- free_memory = get_info();
- }
-#else
- std::cerr << warning_info << std::endl;
- free_memory = total_memory;
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma message("Querying the number of bytes of free memory is not supported")
-#else
-#warning "Querying the number of bytes of free memory is not supported"
-#endif
-#endif
- }
-
- void get_device_info(device_info &out) const
- {
- dpct::get_device_info(out, *this);
- }
-
- device_info get_device_info() const
- {
- device_info prop;
- dpct::get_device_info(prop, *this);
- return prop;
- }
-
- void reset()
- {
- std::lock_guard lock(m_mutex);
- clear_queues();
- init_queues();
- }
-
- sycl::queue &in_order_queue() { return *_q_in_order; }
-
- sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
-
- sycl::queue &default_queue()
- {
- return in_order_queue();
- }
-
- void queues_wait_and_throw()
- {
- std::unique_lock lock(m_mutex);
- std::vector> current_queues(
- _queues);
- lock.unlock();
- for (const auto &q : current_queues)
- {
- q->wait_and_throw();
- }
- // Guard the destruct of current_queues to make sure the ref count is safe.
- lock.lock();
- }
-
- sycl::queue *create_queue(bool enable_exception_handler = false)
- {
- return create_in_order_queue(enable_exception_handler);
- }
-
- sycl::queue *create_queue(sycl::context context, sycl::device device,
- bool enable_exception_handler = false) {
- return create_in_order_queue(context, device, enable_exception_handler);
- }
-
- sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
- std::lock_guard lock(m_mutex);
- return create_queue_impl(enable_exception_handler,
- sycl::property::queue::in_order());
- }
-
- sycl::queue *create_in_order_queue(sycl::context context, sycl::device device,
- bool enable_exception_handler = false) {
- std::lock_guard lock(m_mutex);
- return create_queue_impl(context, device, enable_exception_handler,
- sycl::property::queue::in_order());
- }
-
- sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) {
- std::lock_guard lock(m_mutex);
- return create_queue_impl(enable_exception_handler);
- }
-
- void destroy_queue(sycl::queue *&queue)
- {
- std::lock_guard lock(m_mutex);
- _queues.erase(std::remove_if(_queues.begin(), _queues.end(),
- [=](const std::shared_ptr &q) -> bool
- {
- return q.get() == queue;
- }),
- _queues.end());
- queue = nullptr;
- }
- void set_saved_queue(sycl::queue *q)
- {
- std::lock_guard lock(m_mutex);
- _saved_queue = q;
- }
- sycl::queue *get_saved_queue() const
- {
- std::lock_guard lock(m_mutex);
- return _saved_queue;
- }
- sycl::context get_context() const { return _ctx; }
-
- private:
- void clear_queues()
- {
- _queues.clear();
- _q_in_order = _q_out_of_order = _saved_queue = nullptr;
- }
-
- void init_queues()
- {
- _q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
- _q_out_of_order = create_queue_impl(true);
- _saved_queue = &default_queue();
- }
-
- /// Caller should acquire resource \p m_mutex before calling this function.
- template
- sycl::queue *create_queue_impl(bool enable_exception_handler,
- Properties... properties)
- {
- sycl::async_handler eh = {};
- if (enable_exception_handler)
- {
- eh = exception_handler;
- }
- _queues.push_back(std::make_shared(
- _ctx, *this, eh,
- sycl::property_list(
-#ifdef DPCT_PROFILING_ENABLED
- sycl::property::queue::enable_profiling(),
-#endif
- properties...)));
-
- return _queues.back().get();
- }
-
- template
- sycl::queue *create_queue_impl(sycl::context context, sycl::device device,
- bool enable_exception_handler,
- Properties... properties) {
- sycl::async_handler eh = {};
- if (enable_exception_handler) {
- eh = exception_handler;
- }
- _queues.push_back(std::make_shared(
- context, device, eh,
- sycl::property_list(
- #ifdef DPCT_PROFILING_ENABLED
- sycl::property::queue::enable_profiling(),
- #endif
- properties...)));
-
- return _queues.back().get();
- }
-
- void get_version(int &major, int &minor) const
- {
- detail::get_version(*this, major, minor);
- }
- sycl::queue *_q_in_order, *_q_out_of_order;
- sycl::queue *_saved_queue;
- sycl::context _ctx;
- std::vector> _queues;
- mutable mutex_type m_mutex;
- };
-
- /// device manager
- class dev_mgr
- {
- public:
- device_ext ¤t_device()
- {
- unsigned int dev_id = current_device_id();
- check_id(dev_id);
- return *_devs[dev_id];
- }
- device_ext &cpu_device() const
- {
- std::lock_guard lock(m_mutex);
- if (_cpu_device == -1)
- {
- throw std::runtime_error("no valid cpu device");
- }
- else
- {
- return *_devs[_cpu_device];
- }
- }
- device_ext &get_device(unsigned int id) const
- {
- std::lock_guard lock(m_mutex);
- check_id(id);
- return *_devs[id];
- }
- unsigned int current_device_id() const
- {
- std::lock_guard lock(m_mutex);
- auto it = _thread2dev_map.find(get_tid());
- if (it != _thread2dev_map.end())
- return it->second;
- return DEFAULT_DEVICE_ID;
- }
-
- /// Select device with a device ID.
- /// \param [in] id The id of the device which can
- /// be obtained through get_device_id(const sycl::device).
- void select_device(unsigned int id)
- {
- std::lock_guard lock(m_mutex);
- check_id(id);
- _thread2dev_map[get_tid()] = id;
- }
- unsigned int device_count() { return _devs.size(); }
-
- unsigned int get_device_id(const sycl::device &dev)
- {
- unsigned int id = 0;
- for (auto dev_item : _devs)
- {
- if (*dev_item == dev)
- {
- break;
- }
- id++;
- }
- return id;
- }
-
- template
- std::enable_if_t<
- std::is_invocable_r_v>
- select_device(const DeviceSelector &selector = sycl::gpu_selector_v)
- {
- sycl::device selected_device = sycl::device(selector);
- unsigned int selected_device_id = get_device_id(selected_device);
- select_device(selected_device_id);
- }
-
- /// Returns the instance of device manager singleton.
- static dev_mgr &instance()
- {
- static dev_mgr d_m;
- return d_m;
- }
- dev_mgr(const dev_mgr &) = delete;
- dev_mgr &operator=(const dev_mgr &) = delete;
- dev_mgr(dev_mgr &&) = delete;
- dev_mgr &operator=(dev_mgr &&) = delete;
-
- private:
- mutable std::recursive_mutex m_mutex;
- static bool compare_dev(sycl::device &device1, sycl::device &device2)
- {
- dpct::device_info prop1;
- dpct::get_device_info(prop1, device1);
- dpct::device_info prop2;
- dpct::get_device_info(prop2, device2);
- return prop1.get_max_compute_units() > prop2.get_max_compute_units();
- }
- static int convert_backend_index(std::string & backend) {
- if (backend == "ext_oneapi_level_zero:gpu") return 0;
- if (backend == "opencl:gpu") return 1;
- if (backend == "ext_oneapi_cuda:gpu") return 2;
- if (backend == "ext_oneapi_hip:gpu") return 3;
- if (backend == "opencl:cpu") return 4;
- if (backend == "opencl:acc") return 5;
- printf("convert_backend_index: can't handle backend=%s\n", backend.c_str());
- GGML_ASSERT(false);
- }
- static bool compare_backend(std::string &backend1, std::string &backend2) {
- return convert_backend_index(backend1) < convert_backend_index(backend2);
- }
- dev_mgr()
- {
- sycl::device default_device =
- sycl::device(sycl::default_selector_v);
- _devs.push_back(std::make_shared(default_device));
-
- std::vector sycl_all_devs;
- // Collect other devices except for the default device.
- if (default_device.is_cpu())
- _cpu_device = 0;
-
- auto Platforms = sycl::platform::get_platforms();
- // Keep track of the number of devices per backend
- std::map DeviceNums;
- std::map> backend_devices;
-
- while (!Platforms.empty()) {
- auto Platform = Platforms.back();
- Platforms.pop_back();
- auto devices = Platform.get_devices();
- std::string backend_type = get_device_backend_and_type(devices[0]);
- for (const auto &device : devices) {
- backend_devices[backend_type].push_back(device);
- }
- }
-
- std::vector keys;
- for(auto it = backend_devices.begin(); it != backend_devices.end(); ++it) {
- keys.push_back(it->first);
- }
- std::sort(keys.begin(), keys.end(), compare_backend);
-
- for (auto &key : keys) {
- std::vector devs = backend_devices[key];
- std::sort(devs.begin(), devs.end(), compare_dev);
- for (const auto &dev : devs) {
- sycl_all_devs.push_back(dev);
- }
- }
-
- for (auto &dev : sycl_all_devs)
- {
- if (dev == default_device)
- {
- continue;
- }
- _devs.push_back(std::make_shared(dev));
- if (_cpu_device == -1 && dev.is_cpu())
- {
- _cpu_device = _devs.size() - 1;
- }
- }
- }
- void check_id(unsigned int id) const
- {
- if (id >= _devs.size())
- {
- throw std::runtime_error("invalid device id");
- }
- }
- std::vector> _devs;
- /// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
- /// thread id in _thread2dev_map, which means default device should be used
- /// for the current thread.
- const unsigned int DEFAULT_DEVICE_ID = 0;
- /// thread-id to device-id map.
- std::map _thread2dev_map;
- int _cpu_device = -1;
- };
-
- static inline sycl::queue &get_default_queue()
- {
- return dev_mgr::instance().current_device().default_queue();
- }
-
- namespace detail
- {
- enum class pointer_access_attribute
- {
- host_only = 0,
- device_only,
- host_device,
- end
- };
-
- static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
- const void *ptr)
- {
- switch (sycl::get_pointer_type(ptr, q.get_context()))
- {
- case sycl::usm::alloc::unknown:
- return pointer_access_attribute::host_only;
- case sycl::usm::alloc::device:
- return pointer_access_attribute::device_only;
- case sycl::usm::alloc::shared:
- case sycl::usm::alloc::host:
- return pointer_access_attribute::host_device;
- }
- }
-
- template
- inline constexpr std::uint64_t get_type_combination_id(ArgT Val)
- {
- static_assert((unsigned char)library_data_t::library_data_t_size <=
- std::numeric_limits::max() &&
- "library_data_t size exceeds limit.");
- static_assert(std::is_same_v, "Unsupported ArgT");
- return (std::uint64_t)Val;
- }
-
- template
- inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
- RestT... RestVal)
- {
- static_assert((std::uint8_t)library_data_t::library_data_t_size <=
- std::numeric_limits::max() &&
- "library_data_t size exceeds limit.");
- static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
- static_assert(std::is_same_v, "Unsupported FirstT");
- return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
- }
-
- class mem_mgr
- {
- mem_mgr()
- {
- // Reserved address space, no real memory allocation happens here.
-#if defined(__linux__)
- mapped_address_space =
- (byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-#elif defined(_WIN64)
- mapped_address_space = (byte_t *)VirtualAlloc(
- NULL, // NULL specified as the base address parameter
- mapped_region_size, // Size of allocation
- MEM_RESERVE, // Allocate reserved pages
- PAGE_NOACCESS); // Protection = no access
-#else
-#error "Only support Windows and Linux."
-#endif
- next_free = mapped_address_space;
- };
-
- public:
- using buffer_id_t = int;
-
- struct allocation
- {
- buffer_t buffer;
- byte_t *alloc_ptr;
- size_t size;
- };
-
- ~mem_mgr()
- {
-#if defined(__linux__)
- munmap(mapped_address_space, mapped_region_size);
-#elif defined(_WIN64)
- VirtualFree(mapped_address_space, 0, MEM_RELEASE);
-#else
-#error "Only support Windows and Linux."
-#endif
- };
-
- mem_mgr(const mem_mgr &) = delete;
- mem_mgr &operator=(const mem_mgr &) = delete;
- mem_mgr(mem_mgr &&) = delete;
- mem_mgr &operator=(mem_mgr &&) = delete;
-
- /// Allocate
- void *mem_alloc(size_t size)
- {
- if (!size)
- return nullptr;
- std::lock_guard lock(m_mutex);
- if (next_free + size > mapped_address_space + mapped_region_size)
- {
- throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
- }
- // Allocation
- sycl::range<1> r(size);
- buffer_t buf(r);
- allocation A{buf, next_free, size};
- // Map allocation to device pointer
- void *result = next_free;
- m_map.emplace(next_free + size, A);
- // Update pointer to the next free space.
- next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
-
- return result;
- }
-
- /// Deallocate
- void mem_free(const void *ptr)
- {
- if (!ptr)
- return;
- std::lock_guard lock(m_mutex);
- auto it = get_map_iterator(ptr);
- m_map.erase(it);
- }
-
- /// map: device pointer -> allocation(buffer, alloc_ptr, size)
- allocation translate_ptr(const void *ptr)
- {
- std::lock_guard lock(m_mutex);
- auto it = get_map_iterator(ptr);
- return it->second;
- }
-
- /// Check if the pointer represents device pointer or not.
- bool is_device_ptr(const void *ptr) const
- {
- std::lock_guard lock(m_mutex);
- return (mapped_address_space <= ptr) &&
- (ptr < mapped_address_space + mapped_region_size);
- }
-
- /// Returns the instance of memory manager singleton.
- static mem_mgr &instance()
- {
- static mem_mgr m;
- return m;
- }
-
- private:
- std::map m_map;
- mutable std::mutex m_mutex;
- byte_t *mapped_address_space;
- byte_t *next_free;
- const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
- const size_t alignment = 256;
- /// This padding may be defined to some positive value to debug
- /// out of bound accesses.
- const size_t extra_padding = 0;
-
- std::map::iterator get_map_iterator(const void *ptr)
- {
- auto it = m_map.upper_bound((byte_t *)ptr);
- if (it == m_map.end())
- {
- // Not a virtual pointer.
- throw std::runtime_error("can not get buffer from non-virtual pointer");
- }
- const allocation &alloc = it->second;
- if (ptr < alloc.alloc_ptr)
- {
- // Out of bound.
- // This may happen if there's a gap between allocations due to alignment
- // or extra padding and pointer points to this gap.
- throw std::runtime_error("invalid virtual pointer");
- }
- return it;
- }
- };
-
- template
- class accessor;
- template
- class memory_traits
- {
- public:
- static constexpr sycl::access::target target =
- sycl::access::target::device;
- static constexpr sycl::access_mode mode =
- (Memory == constant) ? sycl::access_mode::read
- : sycl::access_mode::read_write;
- static constexpr size_t type_size = sizeof(T);
- using element_t =
- typename std::conditional::type;
- using value_t = typename std::remove_cv::type;
- template
- using accessor_t = typename std::conditional<
- Memory == local, sycl::local_accessor,
- sycl::accessor>::type;
- using pointer_t = T *;
- };
-
- static inline void *dpct_malloc(size_t size, sycl::queue &q)
- {
- return sycl::malloc_device(size, q.get_device(), q.get_context());
- }
-
-#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
- static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
- sycl::queue &q)
- {
- pitch = PITCH_DEFAULT_ALIGN(x);
- return dpct_malloc(pitch * y * z, q);
- }
-
- /**
- * @brief Sets \p value to the first \p size elements starting from \p dev_ptr in \p q.
- * @tparam valueT The type of the element to be set.
- * @param [in] q The queue in which the operation is done.
- * @param [in] dev_ptr Pointer to the virtual device memory address.
- * @param [in] value The value to be set.
- * @param [in] size Number of elements to be set to the value.
- * @return An event representing the memset operation.
- */
- template
- static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
- valueT value, size_t size)
- {
- return q.fill(dev_ptr, value, size);
- }
-
- /**
- * @brief Sets \p value to the 3D memory region pointed by \p data in \p q.
- * @tparam valueT The type of the element to be set.
- * @param [in] q The queue in which the operation is done.
- * @param [in] data Pointer to the pitched device memory region.
- * @param [in] value The value to be set.
- * @param [in] size 3D memory region by number of elements.
- * @return An event list representing the memset operations.
- */
- template
- static inline std::vector
- dpct_memset(sycl::queue &q, pitched_data data, valueT value,
- sycl::range<3> size)
- {
- std::vector event_list;
- size_t slice = data.get_pitch() * data.get_y();
- unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
- for (size_t z = 0; z < size.get(2); ++z)
- {
- unsigned char *data_ptr = data_surface;
- for (size_t y = 0; y < size.get(1); ++y)
- {
- event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
- data_ptr += data.get_pitch();
- }
- data_surface += slice;
- }
- return event_list;
- }
-
- /**
- * @brief Sets \p val to the pitched 2D memory region pointed by \p ptr in \p q.
- * @tparam valueT The type of the element to be set.
- * @param [in] q The queue in which the operation is done.
- * @param [in] ptr Pointer to the virtual device memory.
- * @param [in] pitch The pitch size by number of elements, including padding.
- * @param [in] val The value to be set.
- * @param [in] x The width of memory region by number of elements.
- * @param [in] y The height of memory region by number of elements.
- * @return An event list representing the memset operations.
- */
- template
- static inline std::vector
- dpct_memset(sycl::queue &q, void *ptr, size_t pitch, valueT val, size_t x,
- size_t y)
- {
- return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
- sycl::range<3>(x, y, 1));
- }
-
- static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
- const void *from_ptr,
- memcpy_direction dir)
- {
- switch (dir)
- {
- case memcpy_direction::host_to_host:
- case memcpy_direction::host_to_device:
- case memcpy_direction::device_to_host:
- case memcpy_direction::device_to_device:
- return dir;
- case memcpy_direction::automatic:
- {
- // table[to_attribute][from_attribute]
- static const memcpy_direction
- direction_table[static_cast(pointer_access_attribute::end)]
- [static_cast(pointer_access_attribute::end)] =
- {{memcpy_direction::host_to_host,
- memcpy_direction::device_to_host,
- memcpy_direction::host_to_host},
- {memcpy_direction::host_to_device,
- memcpy_direction::device_to_device,
- memcpy_direction::device_to_device},
- {memcpy_direction::host_to_host,
- memcpy_direction::device_to_device,
- memcpy_direction::device_to_device}};
- return direction_table[static_cast(get_pointer_attribute(
- q, to_ptr))][static_cast(get_pointer_attribute(q, from_ptr))];
- }
- default:
- throw std::runtime_error("dpct_memcpy: invalid direction value");
- }
- }
-
- static sycl::event
- dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
- memcpy_direction direction,
- const std::vector &dep_events = {})
- {
- if (!size)
- return sycl::event{};
- return q.memcpy(to_ptr, from_ptr, size, dep_events);
- GGML_UNUSED(direction);
- }
-
- // Get actual copy range and make sure it will not exceed range.
- static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
- size_t pitch)
- {
- return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
- }
-
- static inline size_t get_offset(sycl::id<3> id, size_t slice,
- size_t pitch)
- {
- return slice * id.get(2) + pitch * id.get(1) + id.get(0);
- }
-
- /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
- /// and \p from_range to another specified by \p to_ptr and \p to_range.
- static inline std::vector
- dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
- sycl::range<3> to_range, sycl::range<3> from_range,
- sycl::id<3> to_id, sycl::id<3> from_id,
- sycl::range<3> size, memcpy_direction direction,
- const std::vector &dep_events = {})
- {
- // RAII for host pointer
- class host_buffer
- {
- void *_buf;
- size_t _size;
- sycl::queue &_q;
- const std::vector &_deps; // free operation depends
-
- public:
- host_buffer(size_t size, sycl::queue &q,
- const std::vector &deps)
- : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
- void *get_ptr() const { return _buf; }
- size_t get_size() const { return _size; }
- ~host_buffer()
- {
- if (_buf)
- {
- _q.submit([&](sycl::handler &cgh)
- {
- cgh.depends_on(_deps);
- cgh.host_task([buf = _buf] { std::free(buf); }); });
- }
- }
- };
- std::vector event_list;
-
- size_t to_slice = to_range.get(1) * to_range.get(0),
- from_slice = from_range.get(1) * from_range.get(0);
- unsigned char *to_surface =
- (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
- const unsigned char *from_surface =
- (const unsigned char *)from_ptr +
- get_offset(from_id, from_slice, from_range.get(0));
-
- if (to_slice == from_slice && to_slice == size.get(1) * size.get(0))
- {
- return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
- direction, dep_events)};
- }
- direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
- size_t size_slice = size.get(1) * size.get(0);
- switch (direction)
- {
- case host_to_host:
- for (size_t z = 0; z < size.get(2); ++z)
- {
- unsigned char *to_ptr = to_surface;
- const unsigned char *from_ptr = from_surface;
- if (to_range.get(0) == from_range.get(0) &&
- to_range.get(0) == size.get(0))
- {
- event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
- direction, dep_events));
- }
- else
- {
- for (size_t y = 0; y < size.get(1); ++y)
- {
- event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
- direction, dep_events));
- to_ptr += to_range.get(0);
- from_ptr += from_range.get(0);
- }
- }
- to_surface += to_slice;
- from_surface += from_slice;
- }
- break;
- case host_to_device:
- {
- host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
- event_list);
- std::vector host_events;
- if (to_slice == size_slice)
- {
- // Copy host data to a temp host buffer with the shape of target.
- host_events =
- dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
- sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
- host_to_host, dep_events);
- }
- else
- {
- // Copy host data to a temp host buffer with the shape of target.
- host_events = dpct_memcpy(
- q, buf.get_ptr(), from_surface, to_range, from_range,
- sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
- // If has padding data, not sure whether it is useless. So fill temp
- // buffer with it.
- std::vector{
- dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
- device_to_host, dep_events)});
- }
- // Copy from temp host buffer to device with only one submit.
- event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
- buf.get_size(), host_to_device,
- host_events));
- break;
- }
- case device_to_host:
- {
- host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
- event_list);
- // Copy from host temp buffer to host target with reshaping.
- event_list = dpct_memcpy(
- q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
- sycl::id<3>(0, 0, 0), size, host_to_host,
- // Copy from device to temp host buffer with only one submit.
- std::vector{dpct_memcpy(q, buf.get_ptr(), from_surface,
- buf.get_size(),
- device_to_host, dep_events)});
- break;
- }
- case device_to_device:
- event_list.push_back(q.submit([&](sycl::handler &cgh){
- cgh.depends_on(dep_events);
- cgh.parallel_for(
- size,
- [=](sycl::id<3> id) {
- to_surface[get_offset(id, to_slice, to_range.get(0))] =
- from_surface[get_offset(id, from_slice, from_range.get(0))];
- }); }));
- break;
- default:
- throw std::runtime_error("dpct_memcpy: invalid direction value");
- }
- return event_list;
- }
-
- /// memcpy 2D/3D matrix specified by pitched_data.
- static inline std::vector
- dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
- pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
- memcpy_direction direction = automatic)
- {
- return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
- sycl::range<3>(to.get_pitch(), to.get_y(), 1),
- sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
- size, direction);
- }
-
- /// memcpy 2D matrix with pitch.
- static inline std::vector
- dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
- size_t to_pitch, size_t from_pitch, size_t x, size_t y,
- memcpy_direction direction = automatic)
- {
- return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
- sycl::range<3>(from_pitch, y, 1),
- sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
- sycl::range<3>(x, y, 1), direction);
- }
-
- namespace deprecated
- {
-
- template
- class usm_allocator
- {
- private:
- using Alloc = sycl::usm_allocator;
- Alloc _impl;
-
- public:
- using value_type = typename std::allocator_traits::value_type;
- using pointer = typename std::allocator_traits::pointer;
- using const_pointer = typename std::allocator_traits::const_pointer;
- using void_pointer = typename std::allocator_traits::void_pointer;
- using const_void_pointer =
- typename std::allocator_traits::const_void_pointer;
- using reference = typename std::allocator_traits::value_type &;
- using const_reference =
- const typename std::allocator_traits::value_type &;
- using difference_type =
- typename std::allocator_traits::difference_type;
- using size_type = typename std::allocator_traits::size_type;
- using propagate_on_container_copy_assignment = typename std::allocator_traits<
- Alloc>::propagate_on_container_copy_assignment;
- using propagate_on_container_move_assignment = typename std::allocator_traits<
- Alloc>::propagate_on_container_move_assignment;
- using propagate_on_container_swap =
- typename std::allocator_traits::propagate_on_container_swap;
- using is_always_equal =
- typename std::allocator_traits::is_always_equal;
-
- template
- struct rebind
- {
- typedef usm_allocator other;
- };
-
- usm_allocator() : _impl(dpct::get_default_queue()) {}
- ~usm_allocator() {}
- usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
- usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
- pointer address(reference r) { return &r; }
- const_pointer address(const_reference r) { return &r; }
- pointer allocate(size_type cnt, const_void_pointer hint = nullptr)
- {
- return std::allocator_traits::allocate(_impl, cnt, hint);
- }
- void deallocate(pointer p, size_type cnt)
- {
- std::allocator_traits::deallocate(_impl, p, cnt);
- }
- size_type max_size() const
- {
- return std::allocator_traits::max_size(_impl);
- }
- bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
- bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
- };
-
- } // namespace deprecated
-
- inline void dpct_free(void *ptr,
- const sycl::queue &q)
- {
- if (ptr)
- {
- sycl::free(ptr, q.get_context());
- }
- }
-
- template
- inline auto get_memory(const void *x)
- {
- T *new_x = reinterpret_cast(const_cast(x));
- return new_x;
- }
-
- template
- inline typename DataType::T2 get_value(const T *s, sycl::queue &q)
- {
- using Ty = typename DataType::T2;
- Ty s_h;
- if (get_pointer_attribute(q, s) == pointer_access_attribute::device_only)
- detail::dpct_memcpy(q, (void *)&s_h, (const void *)s, sizeof(T), device_to_host)
- .wait();
- else
- s_h = *reinterpret_cast(s);
- return s_h;
- }
-
- } // namespace detail
-
- template
- inline auto get_value(const T *s, sycl::queue &q)
- {
- return detail::get_value(s, q);
- }
-
- namespace detail
- {
- template
- inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
- oneapi::mkl::transpose b_trans, int m, int n, int k,
- const void *alpha, const void *a, int lda, const void *b,
- int ldb, const void *beta, void *c, int ldc)
- {
- Ts alpha_value = dpct::get_value(reinterpret_cast(alpha), q);
- Ts beta_value = dpct::get_value(reinterpret_cast(beta), q);
- auto data_a = get_memory(a);
- auto data_b = get_memory(b);
- auto data_c = get_memory(c);
- oneapi::mkl::blas::column_major::gemm(
- q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
- data_b, ldb, beta_value, data_c, ldc);
- }
-
- template
- class vectorized_binary
- {
- public:
- inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op)
- {
- VecT v4;
- for (size_t i = 0; i < v4.size(); ++i)
- {
- v4[i] = binary_op(a[i], b[i]);
- }
- return v4;
- }
- };
-
- template
- class vectorized_binary<
- VecT, BinaryOperation,
- std::void_t>>
- {
- public:
- inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op)
- {
- return binary_op(a, b).template as();
- }
- };
-
- template
- inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
- oneapi::mkl::transpose b_trans, int m, int n, int k,
- const void *alpha, const void **a, int lda,
- const void **b, int ldb, const void *beta, void **c,
- int ldc, int batch_size)
- {
- struct matrix_info_t
- {
- oneapi::mkl::transpose transpose_info[2];
- Ts value_info[2];
- std::int64_t size_info[3];
- std::int64_t ld_info[3];
- std::int64_t groupsize_info;
- };
-
- Ts alpha_value = dpct::get_value(reinterpret_cast(alpha), q);
- Ts beta_value = dpct::get_value(reinterpret_cast(beta), q);
-
- matrix_info_t *matrix_info =
- (matrix_info_t *)std::malloc(sizeof(matrix_info_t));
- matrix_info->transpose_info[0] = a_trans;
- matrix_info->transpose_info[1] = b_trans;
- matrix_info->value_info[0] = alpha_value;
- matrix_info->value_info[1] = beta_value;
- matrix_info->size_info[0] = m;
- matrix_info->size_info[1] = n;
- matrix_info->size_info[2] = k;
- matrix_info->ld_info[0] = lda;
- matrix_info->ld_info[1] = ldb;
- matrix_info->ld_info[2] = ldc;
- matrix_info->groupsize_info = batch_size;
-
- sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
- q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
- matrix_info->size_info, matrix_info->size_info + 1,
- matrix_info->size_info + 2, matrix_info->value_info,
- reinterpret_cast(a), matrix_info->ld_info,
- reinterpret_cast(b), matrix_info->ld_info + 1,
- matrix_info->value_info + 1, reinterpret_cast(c),
- matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
-
- q.submit([&](sycl::handler &cgh)
- {
- cgh.depends_on(e);
- cgh.host_task([=] { std::free(matrix_info); }); });
- }
-
- template
- inline void
- gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
- oneapi::mkl::transpose b_trans, int m, int n,
- int k, const void *alpha, const void *a, int lda,
- long long int stride_a, const void *b, int ldb,
- long long int stride_b, const void *beta, void *c,
- int ldc, long long int stride_c, int batch_size)
- {
- Ts alpha_value = dpct::get_value(reinterpret_cast(alpha), q);
- Ts beta_value = dpct::get_value(reinterpret_cast(beta), q);
- auto data_a = get_memory(a);
- auto data_b = get_memory(b);
- auto data_c = get_memory(c);
- oneapi::mkl::blas::column_major::gemm_batch(
- q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
- stride_a, data_b, ldb, stride_b, beta_value,
- data_c, ldc, stride_c, batch_size);
- }
-
- } // namespace detail
-
- template
- inline unsigned vectorized_binary(unsigned a, unsigned b,
- const BinaryOperation binary_op)
- {
- sycl::vec v0{a}, v1{b};
- auto v2 = v0.as();
- auto v3 = v1.as();
- auto v4 =
- detail::vectorized_binary()(v2, v3, binary_op);
- v0 = v4.template as>();
- return v0;
- }
-
- static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
- memcpy_direction direction = automatic,
- sycl::queue &q = dpct::get_default_queue())
- {
- detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
- }
-
- static inline unsigned int select_device(unsigned int id)
- {
- dev_mgr::instance().select_device(id);
- return id;
- }
-
- template
- T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
- unsigned int logical_sub_group_size = 32)
- {
- unsigned int id = g.get_local_linear_id();
- unsigned int start_index =
- id / logical_sub_group_size * logical_sub_group_size;
- unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
- return sycl::select_from_group(g, x,
- target_offset < logical_sub_group_size
- ? start_index + target_offset
- : id);
- }
-
- template
- sycl::vec extract_and_sign_or_zero_extend4(T val)
- {
- return sycl::vec(val)
- .template as, int8_t, uint8_t>, 4>>()
- .template convert();
- }
-
- template
- using dot_product_acc_t =
- std::conditional_t && std::is_unsigned_v,
- uint32_t, int32_t>;
-
- template
- inline auto dp4a(T1 a, T2 b, T3 c)
- {
- dot_product_acc_t res = c;
- auto va = extract_and_sign_or_zero_extend4(a);
- auto vb = extract_and_sign_or_zero_extend4(b);
- res += va[0] * vb[0];
- res += va[1] * vb[1];
- res += va[2] * vb[2];
- res += va[3] * vb[3];
- return res;
- }
-
- struct sub_sat
- {
- template
- auto operator()(const T x, const T y) const
- {
- return sycl::sub_sat(x, y);
- }
- };
-
- template
- inline T vectorized_min(T a, T b)
- {
- sycl::vec v0{a}, v1{b};
- auto v2 = v0.template as();
- auto v3 = v1.template as();
- auto v4 = sycl::min(v2, v3);
- v0 = v4.template as>();
- return v0;
- }
-
- inline float pow(const float a, const int b) { return sycl::pown(a, b); }
- inline double pow(const double a, const int b) { return sycl::pown(a, b); }
- inline float pow(const float a, const float b) { return sycl::pow(a, b); }
- inline double pow(const double a, const double b) { return sycl::pow(a, b); }
- template