Merge branch 'master' into tokenizer-bpe-fixes
This commit is contained in:
commit
b8929d5f06
86 changed files with 31812 additions and 27109 deletions
|
@ -26,3 +26,6 @@ indent_size = 2
|
|||
|
||||
[examples/llama.swiftui/llama.swiftui.xcodeproj/*]
|
||||
indent_style = tab
|
||||
|
||||
[examples/cvector-generator/*.txt]
|
||||
insert_final_newline = unset
|
||||
|
|
12
.github/pull_request_template.md
vendored
12
.github/pull_request_template.md
vendored
|
@ -1,5 +1,7 @@
|
|||
- Self Reported Review Complexity:
|
||||
- [ ] Review Complexity : Low
|
||||
- [ ] Review Complexity : Medium
|
||||
- [ ] Review Complexity : High
|
||||
- [ ] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md)
|
||||
|
||||
|
||||
- [x] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md)
|
||||
- Self-reported review complexity:
|
||||
- [ ] Low
|
||||
- [ ] Medium
|
||||
- [ ] High
|
||||
|
|
|
@ -119,6 +119,7 @@ option(LLAMA_HIP_UMA "llama: use HIP unified memory arch
|
|||
option(LLAMA_VULKAN "llama: use Vulkan" OFF)
|
||||
option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF)
|
||||
option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF)
|
||||
option(LLAMA_VULKAN_MEMORY_DEBUG "llama: enable Vulkan memory debug output" OFF)
|
||||
option(LLAMA_VULKAN_VALIDATE "llama: enable Vulkan validation" OFF)
|
||||
option(LLAMA_VULKAN_RUN_TESTS "llama: run Vulkan tests" OFF)
|
||||
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
|
||||
|
@ -534,6 +535,10 @@ if (LLAMA_VULKAN)
|
|||
add_compile_definitions(GGML_VULKAN_DEBUG)
|
||||
endif()
|
||||
|
||||
if (LLAMA_VULKAN_MEMORY_DEBUG)
|
||||
add_compile_definitions(GGML_VULKAN_MEMORY_DEBUG)
|
||||
endif()
|
||||
|
||||
if (LLAMA_VULKAN_VALIDATE)
|
||||
add_compile_definitions(GGML_VULKAN_VALIDATE)
|
||||
endif()
|
||||
|
@ -684,7 +689,8 @@ if (LLAMA_SYCL)
|
|||
endif()
|
||||
|
||||
set(GGML_HEADERS_SYCL ggml-sycl.h)
|
||||
set(GGML_SOURCES_SYCL ggml-sycl.cpp)
|
||||
file(GLOB GGML_SOURCES_SYCL "ggml-sycl/*.cpp")
|
||||
list(APPEND GGML_SOURCES_SYCL "ggml-sycl.cpp")
|
||||
|
||||
if (WIN32)
|
||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl sycl7 OpenCL mkl_sycl_blas_dll.lib mkl_intel_ilp64_dll.lib mkl_sequential_dll.lib mkl_core_dll.lib)
|
||||
|
|
9
Makefile
9
Makefile
|
@ -38,6 +38,7 @@ BUILD_TARGETS = \
|
|||
llama-tokenize \
|
||||
llama-train-text-from-scratch \
|
||||
llama-vdot \
|
||||
llama-cvector-generator \
|
||||
tests/test-c.o
|
||||
|
||||
# Binaries only useful for tests
|
||||
|
@ -607,6 +608,10 @@ ifdef LLAMA_VULKAN_DEBUG
|
|||
MK_CPPFLAGS += -DGGML_VULKAN_DEBUG
|
||||
endif
|
||||
|
||||
ifdef LLAMA_VULKAN_MEMORY_DEBUG
|
||||
MK_CPPFLAGS += -DGGML_VULKAN_MEMORY_DEBUG
|
||||
endif
|
||||
|
||||
ifdef LLAMA_VULKAN_VALIDATE
|
||||
MK_CPPFLAGS += -DGGML_VULKAN_VALIDATE
|
||||
endif
|
||||
|
@ -922,6 +927,10 @@ llama-eval-callback: examples/eval-callback/eval-callback.cpp ggml.o llama.o $(C
|
|||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
llama-train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# llama.cpp for SYCL
|
||||
|
||||
- [Background](#background)
|
||||
- [Recommended Release](#recommended-release)
|
||||
- [News](#news)
|
||||
- [OS](#os)
|
||||
- [Hardware](#hardware)
|
||||
|
@ -31,8 +32,23 @@ When targeting **Intel CPU**, it is recommended to use llama.cpp for [Intel oneM
|
|||
|
||||
It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [Intel® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose.
|
||||
|
||||
## Recommended Release
|
||||
|
||||
The SYCL backend would be broken by some PRs due to no online CI.
|
||||
|
||||
The following release is verified with good quality:
|
||||
|
||||
|Commit ID|Tag|Release|Verified Platform|
|
||||
|-|-|-|-|
|
||||
|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1|
|
||||
|
||||
|
||||
## News
|
||||
|
||||
- 2024.5
|
||||
- Performance is increased: 34 -> 37 tokens/s of llama-2-7b.Q4_0 on Arc770.
|
||||
- Arch Linux is verified successfully.
|
||||
|
||||
- 2024.4
|
||||
- Support data types: GGML_TYPE_IQ4_NL, GGML_TYPE_IQ4_XS, GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ3_S, GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M.
|
||||
|
||||
|
|
25
README.md
25
README.md
|
@ -195,6 +195,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
|||
- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT)
|
||||
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
|
||||
- [pythops/tenere](https://github.com/pythops/tenere) (AGPL)
|
||||
- [RAGNA Desktop](https://ragna.app/) (proprietary)
|
||||
- [RecurseChat](https://recurse.chat/) (proprietary)
|
||||
- [semperai/amica](https://github.com/semperai/amica)
|
||||
- [withcatai/catai](https://github.com/withcatai/catai)
|
||||
|
@ -386,6 +387,30 @@ brew install llama.cpp
|
|||
```
|
||||
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
|
||||
|
||||
### Nix
|
||||
|
||||
On Mac and Linux, the Nix package manager can be used via
|
||||
```
|
||||
nix profile install nixpkgs#llama-cpp
|
||||
```
|
||||
For flake enabled installs.
|
||||
|
||||
Or
|
||||
```
|
||||
nix-env --file '<nixpkgs>' --install --attr llama-cpp
|
||||
```
|
||||
For non-flake enabled installs.
|
||||
|
||||
This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
|
||||
|
||||
#### Flox
|
||||
|
||||
On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via
|
||||
```
|
||||
flox install llama-cpp
|
||||
```
|
||||
Flox follows the nixpkgs build of llama.cpp.
|
||||
|
||||
### Metal Build
|
||||
|
||||
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
|
||||
|
|
|
@ -1576,6 +1576,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||
return true;
|
||||
}
|
||||
params.out_file = argv[i];
|
||||
params.cvector_outfile = argv[i];
|
||||
return true;
|
||||
}
|
||||
if (arg == "-ofreq" || arg == "--output-frequency") {
|
||||
|
@ -1610,6 +1611,55 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||
params.i_chunk = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
// cvector params
|
||||
if (arg == "--completions-file") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.cvector_completions_file = argv[i];
|
||||
return true;
|
||||
}
|
||||
if (arg == "--positive-file") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.cvector_positive_file = argv[i];
|
||||
return true;
|
||||
}
|
||||
if (arg == "--negative-file") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.cvector_negative_file = argv[i];
|
||||
return true;
|
||||
}
|
||||
if (arg == "--completions") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.n_completions = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
if (arg == "--pca-batch") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.n_pca_batch = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
if (arg == "--pca-iter") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.n_pca_iterations = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
#ifndef LOG_DISABLE_LOGS
|
||||
// Parse args for logging parameters
|
||||
if (log_param_single_parse(argv[i])) {
|
||||
|
@ -1931,6 +1981,16 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||
options.push_back({ "logging", " --log-append", "Don't truncate the old log file." });
|
||||
#endif // LOG_DISABLE_LOGS
|
||||
|
||||
options.push_back({ "cvector" });
|
||||
options.push_back({ "cvector", "-o, --output FNAME", "output file (default: '%s')", params.cvector_outfile.c_str() });
|
||||
options.push_back({ "cvector", " --positive-file FNAME", "positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str() });
|
||||
options.push_back({ "cvector", " --negative-file FNAME", "negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str() });
|
||||
options.push_back({ "cvector", " --completions-file FNAME",
|
||||
"completions file (default: '%s')", params.cvector_completions_file.c_str() });
|
||||
options.push_back({ "cvector", " --completions N", "number of lines of completions file to use (default: %d)", params.n_completions });
|
||||
options.push_back({ "cvector", " --batch-pca N", "batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch });
|
||||
options.push_back({ "cvector", " --iter-pca N", "number of iterations used for PCA (default: %d)", params.n_pca_iterations });
|
||||
|
||||
printf("usage: %s [options]\n", argv[0]);
|
||||
|
||||
for (const auto & o : options) {
|
||||
|
|
|
@ -232,6 +232,15 @@ struct gpt_params {
|
|||
|
||||
bool process_output = false; // collect data for the output tensor
|
||||
bool compute_ppl = true; // whether to compute perplexity
|
||||
|
||||
// cvector-generator params
|
||||
int n_completions = 64;
|
||||
int n_pca_batch = 20;
|
||||
int n_pca_iterations = 1000;
|
||||
std::string cvector_outfile = "control_vector.gguf";
|
||||
std::string cvector_completions_file = "examples/cvector-generator/completions.txt";
|
||||
std::string cvector_positive_file = "examples/cvector-generator/positive.txt";
|
||||
std::string cvector_negative_file = "examples/cvector-generator/negative.txt";
|
||||
};
|
||||
|
||||
void gpt_params_handle_model_default(gpt_params & params);
|
||||
|
|
|
@ -12,6 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|||
|
||||
if (EMSCRIPTEN)
|
||||
else()
|
||||
add_subdirectory(cvector-generator)
|
||||
add_subdirectory(baby-llama)
|
||||
add_subdirectory(batched-bench)
|
||||
add_subdirectory(batched)
|
||||
|
|
5
examples/cvector-generator/CMakeLists.txt
Normal file
5
examples/cvector-generator/CMakeLists.txt
Normal file
|
@ -0,0 +1,5 @@
|
|||
set(TARGET llama-cvector-generator)
|
||||
add_executable(${TARGET} cvector-generator.cpp pca.hpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
34
examples/cvector-generator/README.md
Normal file
34
examples/cvector-generator/README.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
# cvector-generator
|
||||
|
||||
This example demonstrates how to generate a control vector using gguf models.
|
||||
|
||||
Related PRs:
|
||||
- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970)
|
||||
- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880)
|
||||
- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
|
||||
|
||||
## Examples
|
||||
|
||||
```sh
|
||||
# CPU only
|
||||
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf
|
||||
|
||||
# With GPU
|
||||
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99
|
||||
|
||||
# With advanced options
|
||||
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --batch-pca 100
|
||||
|
||||
# To see help message
|
||||
./cvector-generator -h
|
||||
# Then, have a look at "cvector" section
|
||||
```
|
||||
|
||||
## Tips and tricks
|
||||
|
||||
If you have multiple lines per prompt, you can escape the newline character (change it to `\n`). For example:
|
||||
|
||||
```
|
||||
<|im_start|>system\nAct like a person who is extremely happy.<|im_end|>
|
||||
<|im_start|>system\nYou are in a very good mood today<|im_end|>
|
||||
```
|
582
examples/cvector-generator/completions.txt
Normal file
582
examples/cvector-generator/completions.txt
Normal file
|
@ -0,0 +1,582 @@
|
|||
|
||||
That game
|
||||
I can see
|
||||
Hmm, this
|
||||
I can relate to
|
||||
Who is
|
||||
I understand the
|
||||
Ugh,
|
||||
What the hell was
|
||||
Hey, did anyone
|
||||
Although
|
||||
Thank you for choosing
|
||||
What are you
|
||||
Oh w
|
||||
How dare you open
|
||||
It was my pleasure
|
||||
I'm hon
|
||||
I appreciate that you
|
||||
Are you k
|
||||
Whoever left this
|
||||
It's always
|
||||
Ew,
|
||||
Hey, I l
|
||||
Hello? Is someone
|
||||
I understand that
|
||||
That poem
|
||||
Aww, poor
|
||||
Hey, it
|
||||
Alright, who
|
||||
I didn't
|
||||
Well, life
|
||||
The document
|
||||
Oh no, this
|
||||
I'm concerned
|
||||
Hello, this is
|
||||
This art
|
||||
Hmm, this drink
|
||||
Hi there!
|
||||
It seems
|
||||
Is
|
||||
Good
|
||||
I can't
|
||||
Ex
|
||||
Who are
|
||||
I can see that
|
||||
Wow,
|
||||
Today is a
|
||||
Hey friend
|
||||
Sometimes friends
|
||||
Oh, this old
|
||||
The weather outside
|
||||
This place is sur
|
||||
I appreciate your input
|
||||
Thank you for the
|
||||
Look at
|
||||
I'm disappoint
|
||||
To my
|
||||
How dare you
|
||||
That's an
|
||||
This piece of art
|
||||
Eww
|
||||
This park is
|
||||
This is incredible
|
||||
Oh no, someone
|
||||
Exc
|
||||
Well, it'
|
||||
I warned
|
||||
Hey, I understand
|
||||
Hey, I saw
|
||||
How dare you go
|
||||
What the he
|
||||
Hey
|
||||
It's
|
||||
Hello? Hello?
|
||||
It
|
||||
Oh no!
|
||||
This is the perfect
|
||||
Good morning,
|
||||
Oh no, there
|
||||
It's so
|
||||
Yeah
|
||||
Uh,
|
||||
Hello everyone
|
||||
Who turned off
|
||||
The weather
|
||||
Who'
|
||||
Hey, this
|
||||
Wait,
|
||||
Eww, gross
|
||||
Excuse
|
||||
It seems like you
|
||||
Thank you so
|
||||
What happened?
|
||||
Oh my g
|
||||
I am deeply sad
|
||||
I war
|
||||
Okay, let'
|
||||
Hey, that
|
||||
That was a beautiful
|
||||
Oh no! That
|
||||
What happened
|
||||
Hey there
|
||||
The artist'
|
||||
What?!
|
||||
Hey, it'
|
||||
I am disappoint
|
||||
It seems like
|
||||
Oh no! The
|
||||
This park is a
|
||||
If you
|
||||
Yes! I did
|
||||
It sounds
|
||||
What
|
||||
Who is it
|
||||
Hmm, that
|
||||
That's strange
|
||||
Yeah, that was
|
||||
That's interesting
|
||||
This park
|
||||
What the hell
|
||||
Who is that
|
||||
I feel like my
|
||||
Oh well
|
||||
What the hell is
|
||||
Hello? Hello
|
||||
To my dearest
|
||||
Bless you!\"
|
||||
Thank you for
|
||||
Oh, looks like
|
||||
Can you please
|
||||
This place is
|
||||
Eww, what
|
||||
Bless you
|
||||
Is everything
|
||||
Hey, I just
|
||||
Whoever left these
|
||||
Well, that'
|
||||
I feel
|
||||
Hey, do you
|
||||
It's sad
|
||||
Oh no, it
|
||||
Hey, that'
|
||||
Oh my god,
|
||||
Thank you,
|
||||
Hello little one,
|
||||
I apolog
|
||||
Hey team, I
|
||||
How dare you read
|
||||
Who is this and
|
||||
Whoever left
|
||||
Hi there! W
|
||||
A
|
||||
If you have
|
||||
I was
|
||||
U
|
||||
Bless
|
||||
Well, this
|
||||
Oh, I'
|
||||
It's a
|
||||
Eww,
|
||||
Is everything okay?
|
||||
Oh, I
|
||||
Hello, can you
|
||||
Al
|
||||
That was a great
|
||||
What are
|
||||
I understand that not
|
||||
Oh no, not
|
||||
Who is it?\"
|
||||
Hey, can we
|
||||
Whoever is taking
|
||||
I would love to
|
||||
Hey, I noticed
|
||||
Hey, could
|
||||
I understand that there
|
||||
Hello?
|
||||
D
|
||||
Oh man, I
|
||||
Thank you so much
|
||||
Oh no, my
|
||||
Dear [Name
|
||||
Uh
|
||||
I remember
|
||||
Hey, who
|
||||
Well, it
|
||||
Are you
|
||||
I understand that it
|
||||
Hey, is
|
||||
I would
|
||||
Who is this
|
||||
Excuse me
|
||||
Alright
|
||||
I am thrilled
|
||||
Sometimes friends have
|
||||
Who the
|
||||
It's interesting
|
||||
I would love
|
||||
E
|
||||
Hello? Is anyone
|
||||
Well, this is
|
||||
This place
|
||||
Well,
|
||||
I warned you
|
||||
Hey, watch where
|
||||
Oh my
|
||||
That'
|
||||
Sometimes friends have different
|
||||
I understand that everyone
|
||||
What?
|
||||
What do these notes
|
||||
I can relate
|
||||
I'm not
|
||||
I understand
|
||||
To my dear
|
||||
Guys
|
||||
Well
|
||||
Hey, I appreciate
|
||||
Wow, what
|
||||
Dear
|
||||
That melody
|
||||
Who the hell
|
||||
Today is
|
||||
Hello little
|
||||
Wow, look
|
||||
That's great
|
||||
Love is never wrong
|
||||
I'm having
|
||||
Whoa, did
|
||||
Ugh
|
||||
Can you please provide
|
||||
I miss you,
|
||||
I feel uncom
|
||||
I know
|
||||
Ugh, this
|
||||
Hey, watch
|
||||
Oh great, a
|
||||
I didn
|
||||
Okay
|
||||
That game of char
|
||||
Oh
|
||||
I appreciate
|
||||
Who's there
|
||||
I am so
|
||||
Oh great, someone
|
||||
Hey, could you
|
||||
I remember wondering
|
||||
Wait, what?
|
||||
What do
|
||||
Hello? Can
|
||||
Hey there,
|
||||
That game of
|
||||
This is incred
|
||||
Oh my gosh
|
||||
Oh great, f
|
||||
I appreciate your
|
||||
It sounds like
|
||||
What the heck
|
||||
Okay, I understand
|
||||
Ew
|
||||
I understand that this
|
||||
Uh, hi
|
||||
Hi everyone!
|
||||
What the hell?
|
||||
Thank you for your
|
||||
Oh no, the
|
||||
Wow, I
|
||||
Who turned
|
||||
Dear [
|
||||
Whoever
|
||||
This is a
|
||||
Whoa, he
|
||||
What in the world
|
||||
Although the physical
|
||||
Hello, who is
|
||||
That's amaz
|
||||
Hey, I know
|
||||
Okay, that
|
||||
Hi everyone
|
||||
Hey, is everything
|
||||
I understand your fr
|
||||
Oh no, poor
|
||||
Oh, look
|
||||
Good morning
|
||||
Ew, gross
|
||||
Oh no, did
|
||||
Look at the family
|
||||
Hey team
|
||||
Yes!
|
||||
Hey, can I
|
||||
Okay, that'
|
||||
It's great
|
||||
Love is
|
||||
Hey, what
|
||||
Good morning, world
|
||||
Who is it?
|
||||
That poem really reson
|
||||
I
|
||||
That's
|
||||
I understand the task
|
||||
Gu
|
||||
Hello? Who'
|
||||
This postcard is
|
||||
Whoa,
|
||||
Oh, that
|
||||
I understand that I
|
||||
Whoever is
|
||||
Hello? Who is
|
||||
I'm really
|
||||
Wow, this
|
||||
Can
|
||||
This artwork really
|
||||
This is a shame
|
||||
I miss you too
|
||||
Who are you?
|
||||
Today is a difficult
|
||||
Hey, just
|
||||
Are you okay
|
||||
I am
|
||||
Hi,
|
||||
Wow, that
|
||||
Hey there! Can
|
||||
Okay, stay
|
||||
Oh great, just
|
||||
Yeah,
|
||||
Hello? Can you
|
||||
Oh, looks
|
||||
Thank you for sharing
|
||||
I'm glad
|
||||
Hey, is that
|
||||
Hmm
|
||||
It was my
|
||||
It sounds like you
|
||||
Wow, your
|
||||
I was promised certain
|
||||
That was such a
|
||||
Thank
|
||||
Excuse you
|
||||
That was
|
||||
Hey team,
|
||||
I feel un
|
||||
It was
|
||||
What'
|
||||
Hey friend, I
|
||||
How
|
||||
Saying goodbye
|
||||
That
|
||||
It's heart
|
||||
How dare
|
||||
Oh,
|
||||
Hello, may
|
||||
What's this
|
||||
Thank you for recogn
|
||||
Aww, that
|
||||
Oh, I remember
|
||||
Hmm, that'
|
||||
I miss
|
||||
I know this
|
||||
Wait
|
||||
Is everything okay
|
||||
Who is that person
|
||||
Wow, you
|
||||
Oh great
|
||||
I'm sad
|
||||
Wow, the
|
||||
I am very disappoint
|
||||
Who turned off the
|
||||
I understand that things
|
||||
I'm very
|
||||
Hi
|
||||
That's very
|
||||
Okay, I
|
||||
Oh no,
|
||||
Wow, there
|
||||
What's wrong
|
||||
I apologize for
|
||||
Hey, I
|
||||
Can I help you
|
||||
Oh, I didn
|
||||
Alright,
|
||||
Oh wow,
|
||||
Oh my goodness
|
||||
I know this event
|
||||
What in the
|
||||
Saying
|
||||
Yeah, that
|
||||
Guys, I
|
||||
Hey, this v
|
||||
This post
|
||||
Are
|
||||
Hey, can
|
||||
Hello? Is
|
||||
I can only imagine
|
||||
Oh, that sounds
|
||||
Hey, is anyone
|
||||
I am disappointed
|
||||
Hello,
|
||||
Hey everyone, I
|
||||
That was such
|
||||
It's okay
|
||||
The artist
|
||||
Whoa
|
||||
I understand that mistakes
|
||||
Can I help
|
||||
Who
|
||||
Hi everyone! I
|
||||
Hey, can you
|
||||
Wow, how
|
||||
Today
|
||||
Oh no, I
|
||||
Oh well, I
|
||||
Well, that
|
||||
This is the
|
||||
Yes! I finally
|
||||
Hey there little
|
||||
Hello everyone!
|
||||
Love is never
|
||||
Look at the
|
||||
This postcard
|
||||
Oh great,
|
||||
Can I
|
||||
Hmm, this is
|
||||
I understand your
|
||||
Oh, look at
|
||||
B
|
||||
I'm so
|
||||
Whoa, this
|
||||
W
|
||||
Oh, this
|
||||
Sometimes
|
||||
This piece of
|
||||
What the
|
||||
That was a
|
||||
Hey, do
|
||||
Oh no
|
||||
Whoa, what
|
||||
I feel like I
|
||||
The documentary
|
||||
Hello
|
||||
Hello little one
|
||||
I understand that my
|
||||
Eww, that
|
||||
Wow, an
|
||||
Yes! Finally,
|
||||
Although the physical location
|
||||
Whoever is watching
|
||||
That movie
|
||||
I remember wondering about
|
||||
Hey there, little
|
||||
Who's
|
||||
Hello, who
|
||||
Hello everyone! Thank
|
||||
Hello, can
|
||||
That's too
|
||||
Hey, just wanted
|
||||
Hey there, I
|
||||
Saying good
|
||||
Hey there!
|
||||
Who is there?
|
||||
Oh my good
|
||||
I am very
|
||||
Oh no, what
|
||||
Wow, thank
|
||||
I was promised
|
||||
Hi, is
|
||||
Hey, I'
|
||||
Guys, the
|
||||
Oh no, that
|
||||
Who is there
|
||||
Hello, this
|
||||
That movie really touched
|
||||
If you have something
|
||||
The documentary was
|
||||
I'm starting
|
||||
Are you kidd
|
||||
That movie really
|
||||
Hey everyone,
|
||||
Thank you for considering
|
||||
I didn'
|
||||
Yes! I
|
||||
Can you
|
||||
Oh my god
|
||||
Hey, whoever
|
||||
That melody really
|
||||
Thank you, little
|
||||
Hello, may I
|
||||
Look
|
||||
Wow, we
|
||||
It looks
|
||||
What do these
|
||||
Oh wow
|
||||
I apologize
|
||||
What are you all
|
||||
It's such
|
||||
It's clear
|
||||
Hey, I was
|
||||
Hey friend,
|
||||
I can only
|
||||
The weather outside is
|
||||
Eww, this
|
||||
I miss you
|
||||
Wow
|
||||
Aww,
|
||||
Hi, is there
|
||||
This artwork
|
||||
Okay,
|
||||
Oh well,
|
||||
This
|
||||
I'
|
||||
Say
|
||||
Hey there little gu
|
||||
Hmm,
|
||||
Whoa, who
|
||||
I am thr
|
||||
Oh man
|
||||
Okay, stay calm
|
||||
I'm happy
|
||||
Oh, this cur
|
||||
Oh man,
|
||||
I'm sorry
|
||||
Hello? Who
|
||||
What?! That
|
||||
This piece
|
||||
Hey everyone
|
||||
That's so
|
||||
Are you okay?
|
||||
What happened? Where
|
||||
Hi there
|
||||
The
|
||||
Who the hell entered
|
||||
I can
|
||||
Guys,
|
||||
What's
|
||||
What in
|
||||
It's important
|
||||
I'm
|
||||
I'm coming
|
||||
It'
|
||||
Yes! Finally
|
||||
Wait, what
|
||||
Wow, reading
|
||||
I'm surprised
|
||||
Hey, did
|
||||
Hey,
|
||||
Okay, let
|
||||
I understand that you
|
||||
Who the hell threw
|
||||
Eww, who
|
||||
Thank you for thinking
|
||||
Who is this?\"
|
||||
I am deeply
|
||||
Thank you for including
|
||||
Oh no, an
|
||||
It looks like you
|
||||
Aww
|
||||
I'm confused
|
||||
Wow, it
|
||||
That poem really
|
||||
Yes
|
||||
Hey there, is
|
||||
Hey, what'
|
||||
Thank you for remember
|
||||
To
|
||||
This is
|
||||
Thank you for making
|
||||
I can'
|
||||
That mel
|
||||
Wow, they
|
||||
I feel like
|
||||
Although the
|
||||
Who are you
|
||||
Love
|
||||
If
|
||||
What the hell are
|
||||
I am so sad
|
||||
Oh, I found
|
||||
Thank you
|
||||
It looks like
|
||||
Well, life is
|
||||
I appreciate that
|
||||
The artist's
|
||||
Whoa, that
|
||||
It's never
|
499
examples/cvector-generator/cvector-generator.cpp
Normal file
499
examples/cvector-generator/cvector-generator.cpp
Normal file
|
@ -0,0 +1,499 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
#include "pca.hpp"
|
||||
|
||||
#ifdef GGML_USE_CUDA
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <climits>
|
||||
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// utils
|
||||
|
||||
template <class Iter>
|
||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
std::string ret;
|
||||
for (; begin != end; ++begin) {
|
||||
ret += llama_token_to_piece(ctx, *begin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_usage(int argc, char ** argv, const gpt_params & params) {
|
||||
gpt_params_print_usage(argc, argv, params);
|
||||
|
||||
printf("\nexample usage:\n");
|
||||
printf("\n CPU only: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf\n", argv[0]);
|
||||
printf("\n with GPU: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99\n", argv[0]);
|
||||
printf("\n advanced: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --batch-pca 100\n", argv[0]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
|
||||
// cb_eval is reused for each pair of positive - negative prompt
|
||||
struct callback_data {
|
||||
ggml_context * ctx_ggml = nullptr; // holds v_pos, v_neg, v_diff_filtered
|
||||
|
||||
int n_layers = 0;
|
||||
int n_tokens = 0;
|
||||
bool is_eval_pos = true;
|
||||
|
||||
// each element of the vector correspond to one layer
|
||||
std::vector<struct ggml_tensor *> v_pos; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_neg; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_diff_filtered; // vector of matrices of size [n_embd, n_nonzero_rows]. NOTE: n_nonzero_rows maybe different for each layer
|
||||
|
||||
// save a tensor into either v_pos or v_neg (decided by is_eval_pos)
|
||||
void save_tensor_for_layer(struct ggml_tensor * t) {
|
||||
GGML_ASSERT(t->type == GGML_TYPE_F32);
|
||||
|
||||
if (ctx_ggml == nullptr) {
|
||||
// alloc a new ctx_ggml if needed
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * n_layers * 3u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx_ggml = ggml_init(params_ggml);
|
||||
}
|
||||
|
||||
// copy tensor data
|
||||
auto n_bytes = ggml_nbytes(t);
|
||||
struct ggml_tensor * t_layer = ggml_new_tensor_2d(ctx_ggml, t->type, t->ne[0], t->ne[1]);
|
||||
t_layer->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
|
||||
ggml_backend_tensor_get(t, t_layer->data, 0, n_bytes);
|
||||
ggml_set_name(t_layer, ggml_get_name(t));
|
||||
//print_debug_tensor(t_layer);
|
||||
|
||||
if (is_eval_pos) {
|
||||
v_pos.push_back(t_layer);
|
||||
} else {
|
||||
v_neg.push_back(t_layer);
|
||||
}
|
||||
}
|
||||
|
||||
// calculate diff (v_pos - v_neg) and place the result back to v_pos
|
||||
// all zero rows in the diff tensor will also be removed
|
||||
// NOTE: final layer is ignored. we only have (n_layers - 1) to process
|
||||
std::vector<struct ggml_tensor *> calc_diff() {
|
||||
for (float il = 0; il < v_pos.size(); il++) {
|
||||
float * a = (float *) v_pos[il]->data;
|
||||
float * b = (float *) v_neg[il]->data;
|
||||
size_t n_elem = ggml_nelements(v_pos[il]);
|
||||
for (size_t j = 0; j < n_elem; j++) {
|
||||
a[j] -= b[j];
|
||||
}
|
||||
//print_debug_tensor(v_pos[i]);
|
||||
auto diff_filtered = filter_nonzero_rows(v_pos[il]);
|
||||
v_diff_filtered.push_back(diff_filtered);
|
||||
}
|
||||
return v_diff_filtered; // for convinient, we return the result std::vector
|
||||
}
|
||||
|
||||
// delete zero rows from a given 2D tensor
|
||||
struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
|
||||
//printf("filter_nonzero_rows\n");
|
||||
auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
|
||||
// check if given row containing all zero elements
|
||||
int n_cols = t->ne[0]; // hint: should be equal to n_embd
|
||||
for (int col = 0; col < n_cols; ++col) {
|
||||
if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
std::vector<int> rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
|
||||
for (int i_row = 0; i_row < a->ne[1]; i_row++) {
|
||||
if (!is_row_all_zeros(a, i_row, 1e-6)) {
|
||||
rows_to_copy.push_back(i_row);
|
||||
}
|
||||
}
|
||||
|
||||
// get "n_nonzero_rows" for the output "diff_filtered"
|
||||
int n_nonzero_rows = rows_to_copy.size();
|
||||
//printf("n_nonzero_rows: %d\n", n_nonzero_rows);
|
||||
int n_embd = a->ne[0];
|
||||
GGML_ASSERT(n_nonzero_rows > 0);
|
||||
|
||||
// diff_filtered: [n_embd, n_nonzero_rows]
|
||||
struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
|
||||
ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
|
||||
ggml_format_name(diff_filtered, "diff_filtered_%s", a->name);
|
||||
diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
|
||||
|
||||
// copy non-zero rows
|
||||
for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
|
||||
int src_row = rows_to_copy[dest_row];
|
||||
for (int i = 0; i < n_embd; i++) {
|
||||
float src_elem = ggml_get_f32_nd(a, i, src_row, 0, 0);
|
||||
ggml_set_f32_nd(diff_filtered, i, dest_row, 0, 0, src_elem);
|
||||
}
|
||||
}
|
||||
|
||||
//print_debug_tensor(diff_filtered);
|
||||
|
||||
return diff_filtered;
|
||||
}
|
||||
|
||||
// we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
|
||||
void reset() {
|
||||
for (auto ptr : v_pos) free(ptr->data);
|
||||
for (auto ptr : v_neg) free(ptr->data);
|
||||
for (auto ptr : v_diff_filtered) free(ptr->data);
|
||||
v_pos.clear();
|
||||
v_neg.clear();
|
||||
v_diff_filtered.clear();
|
||||
if (ctx_ggml) {
|
||||
ggml_free(ctx_ggml);
|
||||
}
|
||||
ctx_ggml = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* process_ctx is used to store the ggml context for pre-post processing the diff vectors
|
||||
* in short, input => v_diff and output => v_final
|
||||
*/
|
||||
struct train_context {
|
||||
ggml_context * ctx_ggml;
|
||||
int n_embd;
|
||||
int n_layers;
|
||||
|
||||
/* pair of prompts to be used for generating final vector */
|
||||
std::vector<std::string> positive_entries;
|
||||
std::vector<std::string> negative_entries;
|
||||
|
||||
// each element of the vector correspond to one layer
|
||||
// NOTE: the last layer is discard. therefore, we will have (n_layers - 1) elements here
|
||||
// NOTE (2): v_diff is transposed from v_diff_tmp
|
||||
std::vector<struct ggml_tensor *> v_diff; // vector of matrices of size [m, n_embd] where m ~ n_tokens * n_completions (v_diff contains no zero-rows)
|
||||
std::vector<struct ggml_tensor *> v_final; // vector of vectors of size [n_embd] to be written to file
|
||||
|
||||
// to easily re-alloc when concat v_diff, we temporary store v_diff in a vector instead of a tensor
|
||||
// v_diff_tmp will get converted unto v_diff later on
|
||||
std::vector<std::vector<uint8_t>> v_diff_tmp;
|
||||
|
||||
train_context(int n_embd_, int n_layers_) {
|
||||
n_embd = n_embd_;
|
||||
n_layers = n_layers_;
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * (n_layers - 1) * 2u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx_ggml = ggml_init(params_ggml);
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
std::vector<uint8_t> empty;
|
||||
v_diff_tmp.push_back(empty);
|
||||
auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
|
||||
t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
|
||||
v_final.push_back(t);
|
||||
}
|
||||
}
|
||||
|
||||
// add new rows into existing tensor in v_diff_tmp
|
||||
void concat_diff_tmp(const std::vector<struct ggml_tensor *> & diff_filtered) {
|
||||
GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
auto t = diff_filtered[il];
|
||||
auto & diff_tmp = v_diff_tmp[il];
|
||||
size_t curr_size = diff_tmp.size();
|
||||
diff_tmp.resize(curr_size + ggml_nbytes(t));
|
||||
memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
|
||||
}
|
||||
}
|
||||
|
||||
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
|
||||
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
|
||||
void build_v_diff() {
|
||||
printf("build_v_diff\n");
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
auto & diff_tmp = v_diff_tmp[il];
|
||||
int n_elem = diff_tmp.size() / sizeof(float);
|
||||
GGML_ASSERT(n_elem % n_embd == 0);
|
||||
int n_rows = n_elem / n_embd;
|
||||
struct ggml_tensor * diff = ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd);
|
||||
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
|
||||
// copy data & transpose
|
||||
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
|
||||
float * arr = (float *) diff_tmp.data();
|
||||
for (int ir = 0; ir < n_rows; ++ir) {
|
||||
for (int ic = 0; ic < n_embd; ++ic) {
|
||||
float f = arr[ir*n_embd + ic];
|
||||
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
|
||||
}
|
||||
}
|
||||
v_diff.push_back(diff);
|
||||
print_debug_tensor(diff);
|
||||
// free memory of diff_tmp
|
||||
diff_tmp.resize(0);
|
||||
}
|
||||
}
|
||||
|
||||
~train_context() {
|
||||
for (auto ptr : v_final) free(ptr->data);
|
||||
for (auto ptr : v_diff) free(ptr->data);
|
||||
// no need to free v_diff_tmp, since we didn't use malloc
|
||||
ggml_free(ctx_ggml);
|
||||
}
|
||||
};
|
||||
|
||||
struct tokenized_prompt {
|
||||
std::vector<llama_token> tokens_pos;
|
||||
std::vector<llama_token> tokens_neg;
|
||||
size_t max_seq_len;
|
||||
|
||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos);
|
||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos);
|
||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||
padding_seq(ctx, tokens_pos, max_seq_len);
|
||||
padding_seq(ctx, tokens_neg, max_seq_len);
|
||||
}
|
||||
|
||||
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||
// TODO: customize padding token
|
||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
||||
llama_token pad_tok = pad_tokens.back();
|
||||
while (tokens.size() < len) {
|
||||
tokens.push_back(pad_tok);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
template <typename T>
|
||||
static std::string to_string(const T & val) {
|
||||
std::stringstream ss;
|
||||
ss << val;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
|
||||
std::vector<std::string> output;
|
||||
std::ifstream file(path);
|
||||
if (!file.is_open()) {
|
||||
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
|
||||
exit(1);
|
||||
}
|
||||
std::string line;
|
||||
while (std::getline(file, line)) {
|
||||
bool is_skip = skip_empty_lines && line.empty();
|
||||
if (!is_skip) {
|
||||
string_process_escapes(line);
|
||||
output.push_back(line);
|
||||
}
|
||||
}
|
||||
file.close();
|
||||
return output;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||
auto * cb_data = (callback_data *) user_data;
|
||||
static const char * l_out_name = "l_out";
|
||||
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
|
||||
|
||||
if (ask) {
|
||||
return is_l_out;
|
||||
}
|
||||
|
||||
if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// save the tensor to current context
|
||||
cb_data->save_tensor_for_layer(t);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
|
||||
llama_kv_cache_clear(ctx);
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
|
||||
struct gguf_context * ctx = gguf_init_empty();
|
||||
|
||||
const std::string arch = "controlvector";
|
||||
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
|
||||
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
|
||||
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
|
||||
|
||||
for (size_t i = 0; i < v_ctrl.size(); ++i) {
|
||||
gguf_add_tensor(ctx, v_ctrl[i]);
|
||||
print_debug_tensor(v_ctrl[i]);
|
||||
printf("Added tensor: %s\n", v_ctrl[i]->name);
|
||||
}
|
||||
|
||||
printf("%s: writing file...\n", __func__);
|
||||
gguf_write_to_file(ctx, fname.c_str(), false);
|
||||
printf("%s: wrote file '%s'\n", __func__, fname.c_str());
|
||||
gguf_free(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load prompt files and completion file.
|
||||
* Then format each pair of prompt + completion to make an entry.
|
||||
*/
|
||||
static int prepare_entries(gpt_params & params, train_context & ctx_train) {
|
||||
// load prompts
|
||||
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
|
||||
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
|
||||
if (positive_prompts.size() != negative_prompts.size()) {
|
||||
fprintf(stderr, "number of positive and negative prompts must be equal\n");
|
||||
return 1;
|
||||
}
|
||||
if (positive_prompts.empty()) {
|
||||
fprintf(stderr, "must provide at least one prompt pair\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// create templated prompts
|
||||
std::vector<std::string> completions = ctrlvec_load_prompt_file(params.cvector_completions_file, false);
|
||||
auto format_template = [](std::string persona, std::string suffix) {
|
||||
// entry in positive/negative.txt must already be formatted i.e. "[INST] Act as if you're extremely happy. [/INST]"
|
||||
return persona + " " + suffix;
|
||||
};
|
||||
for (size_t i = 0; i < positive_prompts.size(); ++i) {
|
||||
for (int j = 0; j < std::min((int) completions.size(), params.n_completions); ++j) {
|
||||
// TODO replicate the truncations done by the python implementation
|
||||
ctx_train.positive_entries.push_back(format_template(positive_prompts[i], completions[j]));
|
||||
ctx_train.negative_entries.push_back(format_template(negative_prompts[i], completions[j]));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
print_usage(argc, argv, params);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_pca_iterations % params.n_pca_batch != 0) {
|
||||
fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
callback_data cb_data;
|
||||
|
||||
// pass the callback to the backend scheduler
|
||||
// it will be executed for each node during the graph computation
|
||||
params.cb_eval = cb_eval;
|
||||
params.cb_eval_user_data = &cb_data;
|
||||
params.warmup = false;
|
||||
|
||||
print_build_info();
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model to get hparams
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||
|
||||
// int n_ctx = llama_n_ctx(ctx);
|
||||
int n_layers = llama_n_layer(model);
|
||||
int n_embd = llama_n_embd(model);
|
||||
// get model hint param (a.k.a model arch name)
|
||||
char model_hint[128];
|
||||
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
|
||||
|
||||
// init train_context
|
||||
train_context ctx_train(n_embd, n_layers);
|
||||
|
||||
// load and prepare entries for training
|
||||
prepare_entries(params, ctx_train);
|
||||
|
||||
// we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
|
||||
std::vector<tokenized_prompt> tokenized_prompts;
|
||||
size_t n_total_tokens = 0;
|
||||
for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
|
||||
tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
|
||||
n_total_tokens += 2 * t.max_seq_len;
|
||||
tokenized_prompts.push_back(std::move(t));
|
||||
}
|
||||
|
||||
std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
|
||||
|
||||
for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
|
||||
bool success = false;
|
||||
tokenized_prompt t = tokenized_prompts[i];
|
||||
cb_data.n_layers = n_layers;
|
||||
cb_data.n_tokens = t.max_seq_len;
|
||||
|
||||
printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
|
||||
(int) i+1, (int) ctx_train.positive_entries.size(),
|
||||
tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
|
||||
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
|
||||
(int) t.max_seq_len);
|
||||
|
||||
cb_data.is_eval_pos = true;
|
||||
success = get_hidden_layers(ctx, t.tokens_pos);
|
||||
if (!success) break;
|
||||
|
||||
cb_data.is_eval_pos = false;
|
||||
success = get_hidden_layers(ctx, t.tokens_neg);
|
||||
if (!success) break;
|
||||
|
||||
// calculate diff and remove all zero rows
|
||||
auto v_diff_filtered = cb_data.calc_diff();
|
||||
|
||||
// save & concat the filtered v_diff to ctx_train
|
||||
ctx_train.concat_diff_tmp(v_diff_filtered);
|
||||
|
||||
// reset for next iteration
|
||||
cb_data.reset();
|
||||
}
|
||||
|
||||
// done with the model, we can now free it to make gain some memory
|
||||
printf("Done evaluate prompts, unload model...\n");
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
// prepare ctx_train for PCA
|
||||
ctx_train.build_v_diff();
|
||||
|
||||
// run PCA
|
||||
PCA::pca_params pca_params;
|
||||
pca_params.n_threads = params.n_threads;
|
||||
pca_params.n_batch = params.n_pca_batch;
|
||||
pca_params.n_iterations = params.n_pca_iterations;
|
||||
PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
|
||||
|
||||
// write output vectors to gguf
|
||||
export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
1
examples/cvector-generator/negative.txt
Normal file
1
examples/cvector-generator/negative.txt
Normal file
|
@ -0,0 +1 @@
|
|||
[INST] Act like a person who is extremely sad. [/INST]
|
322
examples/cvector-generator/pca.hpp
Normal file
322
examples/cvector-generator/pca.hpp
Normal file
|
@ -0,0 +1,322 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef GGML_USE_CUDA
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <ctime>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#define DEBUG_POS 5
|
||||
|
||||
static void print_debug_tensor(struct ggml_tensor * t, bool with_data = true) {
|
||||
printf("%s: %s (%s): [%d, %d]\n", __func__, t->name, ggml_type_name(t->type), (int) t->ne[0], (int) t->ne[1]);
|
||||
if (!with_data) return;
|
||||
printf("%s: %s[0] = [", __func__, t->name);
|
||||
for (size_t i = 0; i <= DEBUG_POS; i++) {
|
||||
printf(" %f,", ggml_get_f32_nd(t, i, 0, 0, 0));
|
||||
}
|
||||
printf(" ... ]\n");
|
||||
}
|
||||
|
||||
namespace PCA {
|
||||
|
||||
// input params for PCA computations
|
||||
struct pca_params {
|
||||
int n_threads = 1;
|
||||
int n_batch = 20; // number of iterations do to in one batch. larger the batch, more memory is used
|
||||
int n_iterations = 1000;
|
||||
float tolerance = 1e-7;
|
||||
|
||||
// for debugging
|
||||
int i_layer = 0;
|
||||
int n_layers = 0;
|
||||
};
|
||||
|
||||
// result from each iteration
|
||||
struct pca_result {
|
||||
struct ggml_tensor * calculated_square = NULL;
|
||||
std::vector<struct ggml_tensor *> eigenvectors;
|
||||
std::vector<float> distances;
|
||||
};
|
||||
|
||||
struct pca_model {
|
||||
ggml_backend_t backend = NULL;
|
||||
ggml_backend_buffer_t buffer;
|
||||
struct ggml_context * ctx; // context to compute graph on target device
|
||||
struct ggml_context * ctx_host; // host context to store results
|
||||
|
||||
// tensors on target device
|
||||
struct ggml_tensor * dev_input;
|
||||
struct ggml_tensor * dev_square;
|
||||
struct ggml_tensor * dev_eigenvector;
|
||||
|
||||
pca_model(struct ggml_tensor * t_input) {
|
||||
#ifdef GGML_USE_CUDA
|
||||
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
||||
backend = ggml_backend_cuda_init(0); // init device 0
|
||||
if (!backend) {
|
||||
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO: enable Metal support when support for GGML_OP_SQRT is added
|
||||
// #ifdef GGML_USE_METAL
|
||||
// fprintf(stderr, "%s: using Metal backend\n", __func__);
|
||||
// backend = ggml_backend_metal_init();
|
||||
// if (!backend) {
|
||||
// fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
|
||||
// }
|
||||
// #endif
|
||||
|
||||
// if there aren't GPU Backends fallback to CPU backend
|
||||
if (!backend) {
|
||||
backend = ggml_backend_cpu_init();
|
||||
}
|
||||
|
||||
const int num_tensors = 4;
|
||||
struct ggml_init_params params {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * num_tensors,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx = ggml_init(params);
|
||||
|
||||
auto n_samples = t_input->ne[0];
|
||||
auto n_embd = t_input->ne[1];
|
||||
|
||||
dev_input = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_samples, n_embd);
|
||||
dev_square = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
||||
dev_eigenvector = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
ggml_set_name(dev_input, "dev_input");
|
||||
ggml_set_name(dev_square, "dev_square");
|
||||
ggml_set_name(dev_eigenvector, "dev_eigenvector");
|
||||
buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
|
||||
ggml_backend_tensor_set(dev_input, t_input->data, 0, ggml_nbytes(t_input));
|
||||
|
||||
// initialize eigenvector to random normalized vector
|
||||
{
|
||||
std::vector<float> random_vec(ggml_nelements(dev_eigenvector), 0.0);
|
||||
std::default_random_engine generator(static_cast<unsigned int>(std::time(0)));
|
||||
std::uniform_real_distribution<float> distribution(0.0, 1.0);
|
||||
float sum_sqr = 0.0; // for normalizing random_vec
|
||||
for (size_t i = 0; i < random_vec.size(); ++i) {
|
||||
float f = distribution(generator);
|
||||
sum_sqr += f * f;
|
||||
random_vec[i] = f;
|
||||
}
|
||||
// normalize it
|
||||
float random_vec_norm = std::sqrt(sum_sqr);
|
||||
for (size_t i = 0; i < random_vec.size(); ++i) {
|
||||
random_vec[i] /= random_vec_norm;
|
||||
}
|
||||
ggml_backend_tensor_set(dev_eigenvector, random_vec.data(), 0, ggml_nbytes(dev_eigenvector));
|
||||
}
|
||||
}
|
||||
|
||||
~pca_model() {
|
||||
ggml_free(ctx);
|
||||
ggml_backend_buffer_free(buffer);
|
||||
ggml_backend_free(backend);
|
||||
}
|
||||
};
|
||||
|
||||
static struct ggml_cgraph * build_graph_piter(
|
||||
const struct pca_params & params,
|
||||
const pca_model & model,
|
||||
bool calc_square = false) {
|
||||
GGML_ASSERT(params.n_batch > 0);
|
||||
// TODO: buf_size must be able to scale with params.n_batch
|
||||
static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
|
||||
static std::vector<uint8_t> buf(buf_size);
|
||||
|
||||
struct ggml_init_params params0 = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf.data(),
|
||||
/*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
|
||||
};
|
||||
// create a temporally context to build the graph
|
||||
struct ggml_context * ctx0 = ggml_init(params0);
|
||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
// turn v_diff_original into square matrix if needed
|
||||
struct ggml_tensor * tmp_square;
|
||||
if (calc_square) {
|
||||
tmp_square = ggml_mul_mat(ctx0, model.dev_input, model.dev_input);
|
||||
ggml_set_name(tmp_square, "tmp_square");
|
||||
}
|
||||
|
||||
struct ggml_tensor * b_tensor;
|
||||
struct ggml_tensor * distance;
|
||||
struct ggml_tensor * old_eigen = model.dev_eigenvector;
|
||||
struct ggml_tensor * input_square = calc_square ? tmp_square : model.dev_square;
|
||||
|
||||
for (int i = 0; i < params.n_batch; ++i) {
|
||||
// b_tensor = square * eigenvector^T
|
||||
b_tensor = ggml_mul_mat(ctx0, input_square, old_eigen);
|
||||
ggml_set_name(b_tensor, "b_tensor");
|
||||
|
||||
// normalize
|
||||
b_tensor = ggml_div_inplace(ctx0,
|
||||
b_tensor,
|
||||
ggml_sqrt_inplace(ctx0, ggml_sum_rows(ctx0, ggml_sqr(ctx0, b_tensor)))
|
||||
);
|
||||
ggml_format_name(b_tensor, "b_tensor_norm_%d", i);
|
||||
|
||||
// calculate distance(new eigenvector - old eigenvector)
|
||||
// we don't use ggml_sub because it may not be implemented on GPU backend
|
||||
struct ggml_tensor * new_sub_old = ggml_add(ctx0, old_eigen, ggml_scale(ctx0, b_tensor, -1));
|
||||
distance = ggml_sqrt_inplace(ctx0,
|
||||
ggml_sum_rows(ctx0, ggml_sqr_inplace(ctx0, new_sub_old)));
|
||||
ggml_format_name(distance, "distance_%d", i);
|
||||
|
||||
old_eigen = b_tensor;
|
||||
|
||||
// build operations nodes
|
||||
ggml_build_forward_expand(gf, distance);
|
||||
}
|
||||
|
||||
// delete the temporally context used to build the graph
|
||||
ggml_free(ctx0);
|
||||
return gf;
|
||||
}
|
||||
|
||||
static ggml_status compute_piter(
|
||||
const struct pca_params & params,
|
||||
const pca_model & model,
|
||||
struct ggml_cgraph * gf,
|
||||
ggml_gallocr_t allocr,
|
||||
struct pca_result & result) {
|
||||
// allocate tensors
|
||||
ggml_gallocr_alloc_graph(allocr, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(model.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(model.backend, params.n_threads);
|
||||
}
|
||||
|
||||
// TODO: enable GPU support when support for GGML_OP_SQRT is added
|
||||
//#ifdef GGML_USE_METAL
|
||||
// if (ggml_backend_is_metal(model.backend)) {
|
||||
// ggml_backend_metal_set_n_cb(model.backend, params.n_threads);
|
||||
// }
|
||||
//#endif
|
||||
|
||||
ggml_status res = ggml_backend_graph_compute(model.backend, gf);
|
||||
if (res == GGML_STATUS_SUCCESS) {
|
||||
auto extract_i = [](std::string prefix, std::string str) -> int {
|
||||
int i = -1;
|
||||
if (str.rfind(prefix, 0) == 0) {
|
||||
sscanf(str.c_str(), (prefix + "%d").c_str(), &i);
|
||||
}
|
||||
return i;
|
||||
};
|
||||
result.calculated_square = NULL;
|
||||
result.eigenvectors.clear();
|
||||
result.distances.clear();
|
||||
result.eigenvectors.resize(params.n_batch);
|
||||
result.distances.resize(params.n_batch);
|
||||
// get output nodes
|
||||
for (int i = 0; i < gf->n_nodes; ++i) {
|
||||
auto node = gf->nodes[i];
|
||||
int iter = -1;
|
||||
// find b_tensor (without copying data from device)
|
||||
if ((iter = extract_i("b_tensor_norm_", node->name)) > -1) {
|
||||
result.eigenvectors[iter] = node;
|
||||
}
|
||||
// find distances, then copy data from device
|
||||
if ((iter = extract_i("distance_", node->name)) > -1) {
|
||||
float d;
|
||||
ggml_backend_tensor_get(node, &d, 0, sizeof(float));
|
||||
result.distances[iter] = d;
|
||||
// std::cout << node->name << " = " << d << "\n";
|
||||
}
|
||||
// find tmp_square if it exists (without copying data from device)
|
||||
if (std::string(node->name) == "tmp_square") {
|
||||
result.calculated_square = node;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static void power_iteration(
|
||||
const struct pca_params & params,
|
||||
struct ggml_tensor * input, // shape of input: [n_samples, n_embd]
|
||||
struct ggml_tensor * output) {
|
||||
//printf("in power iteration\n");
|
||||
struct pca_model model(input);
|
||||
|
||||
ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
|
||||
struct pca_result result;
|
||||
struct ggml_tensor * last_eigenvector = NULL;
|
||||
|
||||
int n_iters = params.n_iterations / params.n_batch; // more batch, fewer iterations
|
||||
for (int iter = 0; iter < n_iters; ++iter) {
|
||||
bool calc_square = (iter == 0); // only need to calculate square for first iteration
|
||||
struct ggml_cgraph * gf = build_graph_piter(params, model, calc_square);
|
||||
// ggml_graph_dump_dot(gf, nullptr, "/tmp/_cgraph.dot");
|
||||
compute_piter(params, model, gf, allocr, result);
|
||||
|
||||
for (size_t k = 0; k < result.distances.size(); ++k) {
|
||||
last_eigenvector = result.eigenvectors[k];
|
||||
if (result.distances[k] < params.tolerance) {
|
||||
break; // done
|
||||
}
|
||||
}
|
||||
|
||||
if (calc_square) {
|
||||
// copy and store the square matrix if needed
|
||||
GGML_ASSERT(result.calculated_square != NULL);
|
||||
ggml_backend_tensor_copy(result.calculated_square, model.dev_square);
|
||||
}
|
||||
|
||||
{
|
||||
// copy last eigen vector and store as input for next iteration
|
||||
GGML_ASSERT(last_eigenvector != NULL);
|
||||
ggml_backend_tensor_copy(last_eigenvector, model.dev_eigenvector);
|
||||
}
|
||||
|
||||
printf("%s: layer %d/%d, iteration: %d / total: %d (batch = %d) ...\n",
|
||||
__func__, params.i_layer+1, params.n_layers, iter, n_iters, params.n_batch);
|
||||
}
|
||||
|
||||
// get output tensor
|
||||
GGML_ASSERT(last_eigenvector);
|
||||
ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector));
|
||||
//print_debug_tensor(output);
|
||||
ggml_gallocr_free(allocr);
|
||||
}
|
||||
|
||||
static void run_pca(
|
||||
struct pca_params & params,
|
||||
const std::vector<struct ggml_tensor *> & v_input, // shape of v_input[0]: [n_samples, n_embd]
|
||||
const std::vector<struct ggml_tensor *> & v_output) {
|
||||
printf("%s: Running PCA...\n", __func__);
|
||||
for (size_t il = 0; il < v_input.size(); ++il) {
|
||||
|
||||
// prepare output vector
|
||||
struct ggml_tensor * ctrl_out = v_output[il];
|
||||
ggml_format_name(ctrl_out, "direction.%ld", il+1);
|
||||
|
||||
// run power_iteration
|
||||
params.i_layer = il;
|
||||
params.n_layers = v_input.size();
|
||||
power_iteration(params, v_input[il], ctrl_out);
|
||||
printf("%s: Done layer %d / %d\n", __func__, (int) il+1, (int) v_input.size());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
1
examples/cvector-generator/positive.txt
Normal file
1
examples/cvector-generator/positive.txt
Normal file
|
@ -0,0 +1 @@
|
|||
[INST] Act like a person who is extremely happy. [/INST]
|
6
flake.lock
generated
6
flake.lock
generated
|
@ -20,11 +20,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1717786204,
|
||||
"narHash": "sha256-4q0s6m0GUcN7q+Y2DqD27iLvbcd1G50T2lv08kKxkSI=",
|
||||
"lastModified": 1718318537,
|
||||
"narHash": "sha256-4Zu0RYRcAY/VWuu6awwq4opuiD//ahpc2aFHg2CWqFY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "051f920625ab5aabe37c920346e3e69d7d34400e",
|
||||
"rev": "e9ee548d90ff586a6471b4ae80ae9cfcbceb3420",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
@ -1172,7 +1172,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
|
|||
// check if a backend with higher prio wants to offload the op
|
||||
if (src_backend_id == sched->n_backends - 1) {
|
||||
for (int b = 0; b < src_backend_id; b++) {
|
||||
if (ggml_backend_offload_op(sched->backends[b], tensor)) {
|
||||
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
|
||||
SET_CAUSE(tensor, "1.off");
|
||||
return b;
|
||||
}
|
||||
|
|
|
@ -2267,6 +2267,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|||
case GGML_OP_SQR:
|
||||
ggml_cuda_op_sqr(ctx, dst);
|
||||
break;
|
||||
case GGML_OP_SQRT:
|
||||
ggml_cuda_op_sqrt(ctx, dst);
|
||||
break;
|
||||
case GGML_OP_CLAMP:
|
||||
ggml_cuda_op_clamp(ctx, dst);
|
||||
break;
|
||||
|
@ -2830,6 +2833,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||
case GGML_OP_RMS_NORM:
|
||||
case GGML_OP_SCALE:
|
||||
case GGML_OP_SQR:
|
||||
case GGML_OP_SQRT:
|
||||
case GGML_OP_CLAMP:
|
||||
case GGML_OP_CONT:
|
||||
case GGML_OP_DIAG_MASK_INF:
|
||||
|
|
|
@ -117,7 +117,7 @@ static __global__ void mul_mat_vec_q(
|
|||
tmp[j][i] = warp_reduce_sum(tmp[j][i]);
|
||||
}
|
||||
|
||||
if (threadIdx.x < rows_per_cuda_block) {
|
||||
if (threadIdx.x < rows_per_cuda_block && (rows_per_cuda_block == 1 || row0 + threadIdx.x < nrows_dst)) {
|
||||
dst[j*nrows_dst + row0 + threadIdx.x] = tmp[j][threadIdx.x];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,6 +92,15 @@ static __global__ void sqr_f32(const float * x, float * dst, const int k) {
|
|||
dst[i] = x[i] * x[i];
|
||||
}
|
||||
|
||||
static __global__ void sqrt_f32(const float * x, float * dst, const int k) {
|
||||
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
|
||||
if (i >= k) {
|
||||
return;
|
||||
}
|
||||
dst[i] = sqrtf(x[i]);
|
||||
}
|
||||
|
||||
static void gelu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||
const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE;
|
||||
gelu_f32<<<num_blocks, CUDA_GELU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||
|
@ -142,6 +151,11 @@ static void sqr_f32_cuda(const float * x, float * dst, const int k, cudaStream_t
|
|||
sqr_f32<<<num_blocks, CUDA_SQR_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||
}
|
||||
|
||||
static void sqrt_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||
const int num_blocks = (k + CUDA_SQRT_BLOCK_SIZE - 1) / CUDA_SQRT_BLOCK_SIZE;
|
||||
sqrt_f32<<<num_blocks, CUDA_SQRT_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
const float * src0_d = (const float *)src0->data;
|
||||
|
@ -284,3 +298,17 @@ void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||
|
||||
sqr_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
const float * src0_d = (const float *)src0->data;
|
||||
float * dst_d = (float *)dst->data;
|
||||
cudaStream_t stream = ctx.stream();
|
||||
|
||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||
|
||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||
|
||||
sqrt_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
|
||||
#define CUDA_HARDSWISH_BLOCK_SIZE 256
|
||||
#define CUDA_SQR_BLOCK_SIZE 256
|
||||
#define CUDA_SQRT_BLOCK_SIZE 256
|
||||
|
||||
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
|
@ -28,3 +29,5 @@ void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|||
void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
|
647
ggml-quants.c
647
ggml-quants.c
File diff suppressed because it is too large
Load diff
11
ggml-rpc.cpp
11
ggml-rpc.cpp
|
@ -73,9 +73,13 @@ struct rpc_tensor {
|
|||
uint64_t view_offs;
|
||||
uint64_t data;
|
||||
char name[GGML_MAX_NAME];
|
||||
|
||||
char padding[4];
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8");
|
||||
|
||||
// RPC commands
|
||||
enum rpc_cmd {
|
||||
ALLOC_BUFFER = 0,
|
||||
|
@ -599,9 +603,8 @@ static void serialize_graph(const ggml_cgraph * cgraph, std::vector<uint8_t> & o
|
|||
int output_size = sizeof(uint32_t) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t) + n_tensors * sizeof(rpc_tensor);
|
||||
output.resize(output_size, 0);
|
||||
memcpy(output.data(), &n_nodes, sizeof(n_nodes));
|
||||
uint64_t * out_nodes = (uint64_t *)(output.data() + sizeof(n_nodes));
|
||||
for (uint32_t i = 0; i < n_nodes; i++) {
|
||||
out_nodes[i] = reinterpret_cast<uint64_t>(cgraph->nodes[i]);
|
||||
memcpy(output.data() + sizeof(n_nodes) + i * sizeof(uint64_t), &cgraph->nodes[i], sizeof(uint64_t));
|
||||
}
|
||||
uint32_t * out_ntensors = (uint32_t *)(output.data() + sizeof(n_nodes) + n_nodes * sizeof(uint64_t));
|
||||
*out_ntensors = n_tensors;
|
||||
|
@ -1036,7 +1039,9 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, std::vector<u
|
|||
}
|
||||
std::unordered_map<uint64_t, ggml_tensor*> tensor_map;
|
||||
for (uint32_t i = 0; i < n_nodes; i++) {
|
||||
graph->nodes[i] = create_node(nodes[i], ctx, tensor_ptrs, tensor_map);
|
||||
int64_t id;
|
||||
memcpy(&id, &nodes[i], sizeof(id));
|
||||
graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map);
|
||||
}
|
||||
ggml_status status = ggml_backend_graph_compute(backend, graph);
|
||||
// output serialization format: | status (1 byte) |
|
||||
|
|
5512
ggml-sycl.cpp
5512
ggml-sycl.cpp
File diff suppressed because it is too large
Load diff
11
ggml-sycl.h
11
ggml-sycl.h
|
@ -8,14 +8,12 @@
|
|||
|
||||
#include "ggml.h"
|
||||
#include "ggml-backend.h"
|
||||
#include "ggml-sycl/presets.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define GGML_SYCL_MAX_DEVICES 48
|
||||
#define GGML_SYCL_NAME "SYCL"
|
||||
|
||||
// backend API
|
||||
GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
|
||||
|
||||
|
@ -33,13 +31,6 @@ GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
|
|||
GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
|
||||
GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
|
||||
GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
|
||||
GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
|
||||
|
||||
// TODO: these are temporary
|
||||
// ref: https://github.com/ggerganov/llama.cpp/pull/6022#issuecomment-1992615670
|
||||
GGML_API GGML_CALL int ggml_backend_sycl_get_device_id(int device_index);
|
||||
GGML_API GGML_CALL void ggml_backend_sycl_set_single_device_mode(int main_gpu_id);
|
||||
GGML_API GGML_CALL void ggml_backend_sycl_set_mul_device_mode();
|
||||
|
||||
// SYCL doesn't support registering host memory, keep here for reference
|
||||
// GGML_API GGML_CALL bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
|
||||
|
|
18
ggml-sycl/backend.hpp
Normal file
18
ggml-sycl/backend.hpp
Normal file
|
@ -0,0 +1,18 @@
|
|||
//
|
||||
// MIT license
|
||||
// Copyright (C) 2024 Intel Corporation
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
|
||||
#ifndef GGML_SYCL_BACKEND_HPP
|
||||
#define GGML_SYCL_BACKEND_HPP
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
#endif // GGML_SYCL_BACKEND_HPP
|
53
ggml-sycl/common.cpp
Normal file
53
ggml-sycl/common.cpp
Normal file
|
@ -0,0 +1,53 @@
|
|||
//
|
||||
// MIT license
|
||||
// Copyright (C) 2024 Intel Corporation
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
int get_current_device_id() {
|
||||
return dpct::dev_mgr::instance().current_device_id();
|
||||
}
|
||||
|
||||
void* ggml_sycl_host_malloc(size_t size) try {
|
||||
if (getenv("GGML_SYCL_NO_PINNED") != nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void* ptr = nullptr;
|
||||
// allow to use dpct::get_in_order_queue() for host malloc
|
||||
dpct::err0 err = CHECK_TRY_ERROR(
|
||||
ptr = (void*)sycl::malloc_host(size, dpct::get_in_order_queue()));
|
||||
|
||||
if (err != 0) {
|
||||
// clear the error
|
||||
fprintf(
|
||||
stderr,
|
||||
"WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
|
||||
size / 1024.0 / 1024.0,
|
||||
"syclGetErrorString is not supported");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
} catch (sycl::exception const& exc) {
|
||||
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
|
||||
<< ", line:" << __LINE__ << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
|
||||
void ggml_sycl_host_free(void* ptr) try {
|
||||
// allow to use dpct::get_in_order_queue() for host malloc
|
||||
SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(ptr, dpct::get_in_order_queue())));
|
||||
} catch (sycl::exception const& exc) {
|
||||
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
|
||||
<< ", line:" << __LINE__ << std::endl;
|
||||
std::exit(1);
|
||||
}
|
298
ggml-sycl/common.hpp
Normal file
298
ggml-sycl/common.hpp
Normal file
|
@ -0,0 +1,298 @@
|
|||
//
|
||||
// MIT license
|
||||
// Copyright (C) 2024 Intel Corporation
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
|
||||
#ifndef GGML_SYCL_COMMON_HPP
|
||||
#define GGML_SYCL_COMMON_HPP
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
#include "dpct/helper.hpp"
|
||||
#include "presets.hpp"
|
||||
|
||||
#define GGML_COMMON_DECL_SYCL
|
||||
#define GGML_COMMON_IMPL_SYCL
|
||||
#include "ggml-common.h"
|
||||
|
||||
void* ggml_sycl_host_malloc(size_t size);
|
||||
void ggml_sycl_host_free(void* ptr);
|
||||
|
||||
static int g_ggml_sycl_debug = 0;
|
||||
#define GGML_SYCL_DEBUG(...) \
|
||||
do { \
|
||||
if (g_ggml_sycl_debug) \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define CHECK_TRY_ERROR(expr) \
|
||||
[&]() { \
|
||||
try { \
|
||||
expr; \
|
||||
return dpct::success; \
|
||||
} catch (std::exception const& e) { \
|
||||
std::cerr << e.what() << "\nException caught at file:" << __FILE__ \
|
||||
<< ", line:" << __LINE__ << ", func:" << __func__ \
|
||||
<< std::endl; \
|
||||
return dpct::default_error; \
|
||||
} \
|
||||
}()
|
||||
|
||||
// #define DEBUG_SYCL_MALLOC
|
||||
|
||||
static int g_work_group_size = 0;
|
||||
// typedef sycl::half ggml_fp16_t;
|
||||
|
||||
#define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP
|
||||
#define VER_4VEC 610 // todo for hardward optimize.
|
||||
#define VER_GEN9 700 // todo for hardward optimize.
|
||||
#define VER_GEN12 1000000 // todo for hardward optimize.
|
||||
#define VER_GEN13 (VER_GEN12 + 1030) // todo for hardward optimize.
|
||||
|
||||
#define GGML_SYCL_MAX_NODES 8192 // TODO: adapt to hardwares
|
||||
|
||||
// define for XMX in Intel GPU
|
||||
// TODO: currently, it's not used for XMX really.
|
||||
#if !defined(GGML_SYCL_FORCE_MMQ)
|
||||
#define SYCL_USE_XMX
|
||||
#endif
|
||||
|
||||
// max batch size to use MMQ kernels when tensor cores are available
|
||||
#define MMQ_MAX_BATCH_SIZE 32
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable : 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
// dmmv = dequantize_mul_mat_vec
|
||||
#ifndef GGML_SYCL_DMMV_X
|
||||
#define GGML_SYCL_DMMV_X 32
|
||||
#endif
|
||||
#ifndef GGML_SYCL_MMV_Y
|
||||
#define GGML_SYCL_MMV_Y 1
|
||||
#endif
|
||||
|
||||
typedef sycl::queue *queue_ptr;
|
||||
|
||||
enum ggml_sycl_backend_gpu_mode {
|
||||
SYCL_UNSET_GPU_MODE = -1,
|
||||
SYCL_SINGLE_GPU_MODE = 0,
|
||||
SYCL_MUL_GPU_MODE
|
||||
};
|
||||
|
||||
static_assert(sizeof(sycl::half) == sizeof(ggml_fp16_t), "wrong fp16 size");
|
||||
|
||||
static void crash() {
|
||||
int* ptr = NULL;
|
||||
*ptr = 0;
|
||||
}
|
||||
|
||||
[[noreturn]] static void ggml_sycl_error(
|
||||
const char* stmt,
|
||||
const char* func,
|
||||
const char* file,
|
||||
const int line,
|
||||
const char* msg) {
|
||||
fprintf(stderr, "SYCL error: %s: %s\n", stmt, msg);
|
||||
fprintf(stderr, " in function %s at %s:%d\n", func, file, line);
|
||||
GGML_ASSERT(!"SYCL error");
|
||||
}
|
||||
|
||||
#define SYCL_CHECK(err) \
|
||||
do { \
|
||||
auto err_ = (err); \
|
||||
if (err_ != 0) \
|
||||
ggml_sycl_error( \
|
||||
#err, \
|
||||
__func__, \
|
||||
__FILE__, \
|
||||
__LINE__, \
|
||||
"Meet error in this line code!"); \
|
||||
} while (0)
|
||||
|
||||
#if DPCT_COMPAT_RT_VERSION >= 11100
|
||||
#define GGML_SYCL_ASSUME(x) __builtin_assume(x)
|
||||
#else
|
||||
#define GGML_SYCL_ASSUME(x)
|
||||
#endif // DPCT_COMPAT_RT_VERSION >= 11100
|
||||
|
||||
#ifdef GGML_SYCL_F16
|
||||
typedef sycl::half dfloat; // dequantize float
|
||||
typedef sycl::half2 dfloat2;
|
||||
#else
|
||||
typedef float dfloat; // dequantize float
|
||||
typedef sycl::float2 dfloat2;
|
||||
#endif // GGML_SYCL_F16
|
||||
|
||||
#define MMVQ_MAX_BATCH_SIZE 8
|
||||
|
||||
static const int8_t kvalues_iq4nl[16]={-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
||||
|
||||
static int g_all_sycl_device_count = -1;
|
||||
static bool g_ggml_backend_sycl_buffer_type_initialized = false;
|
||||
|
||||
static ggml_sycl_backend_gpu_mode g_ggml_sycl_backend_gpu_mode =
|
||||
SYCL_UNSET_GPU_MODE;
|
||||
|
||||
static void* g_scratch_buffer = nullptr;
|
||||
static size_t g_scratch_size = 0; // disabled by default
|
||||
static size_t g_scratch_offset = 0;
|
||||
|
||||
[[noreturn]] static inline void bad_arch(const sycl::stream& stream_ct1) {
|
||||
stream_ct1 << "ERROR: ggml-sycl was compiled without support for the "
|
||||
"current GPU architecture.\n";
|
||||
// __trap();
|
||||
std::exit(1);
|
||||
|
||||
(void)bad_arch; // suppress unused function warning
|
||||
}
|
||||
|
||||
int get_current_device_id();
|
||||
|
||||
inline dpct::err0 ggml_sycl_set_device(const int device) try {
|
||||
|
||||
int current_device_id;
|
||||
SYCL_CHECK(CHECK_TRY_ERROR(current_device_id = get_current_device_id()));
|
||||
|
||||
// GGML_SYCL_DEBUG("ggml_sycl_set_device device_id=%d,
|
||||
// current_device_id=%d\n", device, current_device);
|
||||
if (device == current_device_id) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return CHECK_TRY_ERROR(dpct::select_device(device));
|
||||
} catch (sycl::exception const& exc) {
|
||||
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
|
||||
<< ", line:" << __LINE__ << std::endl;
|
||||
crash();
|
||||
std::exit(1);
|
||||
}
|
||||
|
||||
//////////////////////
|
||||
|
||||
struct ggml_sycl_device_info {
|
||||
int device_count;
|
||||
|
||||
struct sycl_device_info {
|
||||
int cc; // compute capability
|
||||
// int nsm; // number of streaming multiprocessors
|
||||
// size_t smpb; // max. shared memory per block
|
||||
bool vmm; // virtual memory support
|
||||
size_t total_vram;
|
||||
};
|
||||
|
||||
sycl_device_info devices[GGML_SYCL_MAX_DEVICES] = {};
|
||||
|
||||
std::array<float, GGML_SYCL_MAX_DEVICES> default_tensor_split = {};
|
||||
};
|
||||
|
||||
const ggml_sycl_device_info & ggml_sycl_info();
|
||||
|
||||
struct ggml_sycl_pool {
|
||||
virtual ~ggml_sycl_pool() = default;
|
||||
|
||||
virtual void * alloc(size_t size, size_t * actual_size) = 0;
|
||||
virtual void free(void * ptr, size_t size) = 0;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct ggml_sycl_pool_alloc {
|
||||
ggml_sycl_pool * pool = nullptr;
|
||||
T * ptr = nullptr;
|
||||
size_t actual_size = 0;
|
||||
|
||||
explicit ggml_sycl_pool_alloc(ggml_sycl_pool & pool) : pool(&pool) {
|
||||
}
|
||||
|
||||
ggml_sycl_pool_alloc(ggml_sycl_pool & pool, size_t size) : pool(&pool) {
|
||||
alloc(size);
|
||||
}
|
||||
|
||||
~ggml_sycl_pool_alloc() {
|
||||
if (ptr != nullptr) {
|
||||
pool->free(ptr, actual_size);
|
||||
}
|
||||
}
|
||||
|
||||
// size is in number of elements
|
||||
T * alloc(size_t size) {
|
||||
GGML_ASSERT(pool != nullptr);
|
||||
GGML_ASSERT(ptr == nullptr);
|
||||
ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
T * alloc(ggml_sycl_pool & pool, size_t size) {
|
||||
this->pool = &pool;
|
||||
return alloc(size);
|
||||
}
|
||||
|
||||
T * get() {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ggml_sycl_pool_alloc() = default;
|
||||
ggml_sycl_pool_alloc(const ggml_sycl_pool_alloc &) = delete;
|
||||
ggml_sycl_pool_alloc(ggml_sycl_pool_alloc &&) = delete;
|
||||
ggml_sycl_pool_alloc& operator=(const ggml_sycl_pool_alloc &) = delete;
|
||||
ggml_sycl_pool_alloc& operator=(ggml_sycl_pool_alloc &&) = delete;
|
||||
};
|
||||
|
||||
// backend interface
|
||||
|
||||
struct ggml_tensor_extra_gpu {
|
||||
void* data_device[GGML_SYCL_MAX_DEVICES]; // 1 pointer for each device for split
|
||||
// tensors
|
||||
dpct::event_ptr events[GGML_SYCL_MAX_DEVICES]
|
||||
[GGML_SYCL_MAX_STREAMS]; // events for synchronizing multiple GPUs
|
||||
};
|
||||
|
||||
struct ggml_backend_sycl_context {
|
||||
int device;
|
||||
std::string name;
|
||||
|
||||
queue_ptr qptrs[GGML_SYCL_MAX_DEVICES][GGML_SYCL_MAX_STREAMS] = { { nullptr } };
|
||||
|
||||
explicit ggml_backend_sycl_context(int device) :
|
||||
device(device),
|
||||
name(GGML_SYCL_NAME + std::to_string(device)) {
|
||||
}
|
||||
|
||||
queue_ptr stream(int device, int stream) {
|
||||
if (qptrs[device][stream] == nullptr) {
|
||||
qptrs[device][stream] = &(dpct::get_current_device().default_queue());
|
||||
}
|
||||
return qptrs[device][stream];
|
||||
}
|
||||
|
||||
queue_ptr stream() {
|
||||
return stream(device, 0);
|
||||
}
|
||||
|
||||
// pool
|
||||
std::unique_ptr<ggml_sycl_pool> pools[GGML_SYCL_MAX_DEVICES];
|
||||
|
||||
static std::unique_ptr<ggml_sycl_pool> new_pool_for_device(queue_ptr qptr, int device);
|
||||
|
||||
ggml_sycl_pool & pool(int device) {
|
||||
if (pools[device] == nullptr) {
|
||||
pools[device] = new_pool_for_device(stream(device,0), device);
|
||||
}
|
||||
return *pools[device];
|
||||
}
|
||||
|
||||
ggml_sycl_pool & pool() {
|
||||
return pool(device);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#endif // GGML_SYCL_COMMON_HPP
|
2980
ggml-sycl/dpct/helper.hpp
Normal file
2980
ggml-sycl/dpct/helper.hpp
Normal file
File diff suppressed because it is too large
Load diff
69
ggml-sycl/presets.hpp
Normal file
69
ggml-sycl/presets.hpp
Normal file
|
@ -0,0 +1,69 @@
|
|||
//
|
||||
// MIT license
|
||||
// Copyright (C) 2024 Intel Corporation
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
|
||||
#ifndef GGML_SYCL_PRESETS_HPP
|
||||
#define GGML_SYCL_PRESETS_HPP
|
||||
|
||||
#define GGML_SYCL_MAX_STREAMS 8
|
||||
#define GGML_SYCL_MAX_BUFFERS 256
|
||||
#define GGML_SYCL_MAX_DEVICES 48
|
||||
#define GGML_SYCL_NAME "SYCL"
|
||||
|
||||
// FIXME: 1024 from cuda
|
||||
#define GROUP_SIZE 1024
|
||||
#define WARP_SIZE 32
|
||||
#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
|
||||
|
||||
#define SYCL_GELU_BLOCK_SIZE 256
|
||||
#define SYCL_SILU_BLOCK_SIZE 256
|
||||
#define SYCL_TANH_BLOCK_SIZE 256
|
||||
#define SYCL_RELU_BLOCK_SIZE 256
|
||||
#define SYCL_HARDSIGMOID_BLOCK_SIZE 256
|
||||
#define SYCL_HARDSWISH_BLOCK_SIZE 256
|
||||
#define SYCL_SQR_BLOCK_SIZE 256
|
||||
#define SYCL_CPY_BLOCK_SIZE 32
|
||||
#define SYCL_SCALE_BLOCK_SIZE 256
|
||||
#define SYCL_CLAMP_BLOCK_SIZE 256
|
||||
#define SYCL_ROPE_BLOCK_SIZE 256
|
||||
#define SYCL_ALIBI_BLOCK_SIZE 32
|
||||
#define SYCL_DIAG_MASK_INF_BLOCK_SIZE 32
|
||||
#define SYCL_QUANTIZE_BLOCK_SIZE 256
|
||||
#define SYCL_DEQUANTIZE_BLOCK_SIZE 256
|
||||
#define SYCL_GET_ROWS_BLOCK_SIZE 256
|
||||
#define SYCL_UPSCALE_BLOCK_SIZE 256
|
||||
#define SYCL_CONCAT_BLOCK_SIZE 256
|
||||
#define SYCL_PAD_BLOCK_SIZE 256
|
||||
#define SYCL_ACC_BLOCK_SIZE 256
|
||||
#define SYCL_IM2COL_BLOCK_SIZE 256
|
||||
#define SYCL_POOL2D_BLOCK_SIZE 256
|
||||
|
||||
// dmmv = dequantize_mul_mat_vec
|
||||
#ifndef GGML_SYCL_DMMV_X
|
||||
#define GGML_SYCL_DMMV_X 32
|
||||
#endif
|
||||
#ifndef GGML_SYCL_MMV_Y
|
||||
#define GGML_SYCL_MMV_Y 1
|
||||
#endif
|
||||
|
||||
#ifndef K_QUANTS_PER_ITERATION
|
||||
#define K_QUANTS_PER_ITERATION 2
|
||||
#else
|
||||
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
|
||||
#endif
|
||||
|
||||
#ifndef GGML_SYCL_PEER_MAX_BATCH_SIZE
|
||||
#define GGML_SYCL_PEER_MAX_BATCH_SIZE 128
|
||||
#endif // GGML_SYCL_PEER_MAX_BATCH_SIZE
|
||||
|
||||
#define MUL_MAT_SRC1_COL_STRIDE 128
|
||||
|
||||
#endif // GGML_SYCL_PRESETS_HPP
|
40774
ggml-vulkan-shaders.hpp
40774
ggml-vulkan-shaders.hpp
File diff suppressed because it is too large
Load diff
612
ggml-vulkan.cpp
612
ggml-vulkan.cpp
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -14,7 +14,7 @@ import numpy as np
|
|||
if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from gguf import GGUFReader, GGUFValueType # noqa: E402
|
||||
from gguf import GGUFReader, GGUFValueType, ReaderTensor # noqa: E402
|
||||
|
||||
logger = logging.getLogger("gguf-dump")
|
||||
|
||||
|
@ -101,25 +101,285 @@ def dump_metadata_json(reader: GGUFReader, args: argparse.Namespace) -> None:
|
|||
json.dump(result, sys.stdout)
|
||||
|
||||
|
||||
def markdown_table_with_alignment_support(header_map: list[dict[str, str]], data: list[dict[str, Any]]):
|
||||
# JSON to Markdown table formatting: https://stackoverflow.com/a/72983854/2850957
|
||||
|
||||
# Alignment Utility Function
|
||||
def strAlign(padding: int, alignMode: str | None, strVal: str):
|
||||
if alignMode == 'center':
|
||||
return strVal.center(padding)
|
||||
elif alignMode == 'right':
|
||||
return strVal.rjust(padding - 1) + ' '
|
||||
elif alignMode == 'left':
|
||||
return ' ' + strVal.ljust(padding - 1)
|
||||
else: # default left
|
||||
return ' ' + strVal.ljust(padding - 1)
|
||||
|
||||
def dashAlign(padding: int, alignMode: str | None):
|
||||
if alignMode == 'center':
|
||||
return ':' + '-' * (padding - 2) + ':'
|
||||
elif alignMode == 'right':
|
||||
return '-' * (padding - 1) + ':'
|
||||
elif alignMode == 'left':
|
||||
return ':' + '-' * (padding - 1)
|
||||
else: # default left
|
||||
return '-' * (padding)
|
||||
|
||||
# Calculate Padding For Each Column Based On Header and Data Length
|
||||
rowsPadding = {}
|
||||
for index, columnEntry in enumerate(header_map):
|
||||
padCount = max([len(str(v)) for d in data for k, v in d.items() if k == columnEntry['key_name']], default=0) + 2
|
||||
headerPadCount = len(columnEntry['header_name']) + 2
|
||||
rowsPadding[index] = headerPadCount if padCount <= headerPadCount else padCount
|
||||
|
||||
# Render Markdown Header
|
||||
rows = []
|
||||
rows.append('|'.join(strAlign(rowsPadding[index], columnEntry.get('align'), str(columnEntry['header_name'])) for index, columnEntry in enumerate(header_map)))
|
||||
rows.append('|'.join(dashAlign(rowsPadding[index], columnEntry.get('align')) for index, columnEntry in enumerate(header_map)))
|
||||
|
||||
# Render Tabular Data
|
||||
for item in data:
|
||||
rows.append('|'.join(strAlign(rowsPadding[index], columnEntry.get('align'), str(item[columnEntry['key_name']])) for index, columnEntry in enumerate(header_map)))
|
||||
|
||||
# Convert Tabular String Rows Into String
|
||||
tableString = ""
|
||||
for row in rows:
|
||||
tableString += f'|{row}|\n'
|
||||
|
||||
return tableString
|
||||
|
||||
|
||||
def element_count_rounded_notation(count: int) -> str:
|
||||
if count > 1e15 :
|
||||
# Quadrillion
|
||||
scaled_amount = count * 1e-15
|
||||
scale_suffix = "Q"
|
||||
elif count > 1e12 :
|
||||
# Trillions
|
||||
scaled_amount = count * 1e-12
|
||||
scale_suffix = "T"
|
||||
elif count > 1e9 :
|
||||
# Billions
|
||||
scaled_amount = count * 1e-9
|
||||
scale_suffix = "B"
|
||||
elif count > 1e6 :
|
||||
# Millions
|
||||
scaled_amount = count * 1e-6
|
||||
scale_suffix = "M"
|
||||
elif count > 1e3 :
|
||||
# Thousands
|
||||
scaled_amount = count * 1e-3
|
||||
scale_suffix = "K"
|
||||
else:
|
||||
# Under Thousands
|
||||
scaled_amount = count
|
||||
scale_suffix = ""
|
||||
return f"{'~' if count > 1e3 else ''}{round(scaled_amount)}{scale_suffix}"
|
||||
|
||||
|
||||
def translate_tensor_name(name):
|
||||
words = name.split(".")
|
||||
|
||||
# Source: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#standardized-tensor-names
|
||||
abbreviation_dictionary = {
|
||||
'token_embd': 'Token embedding',
|
||||
'pos_embd': 'Position embedding',
|
||||
'output_norm': 'Output normalization',
|
||||
'output': 'Output',
|
||||
'attn_norm': 'Attention normalization',
|
||||
'attn_norm_2': 'Attention normalization',
|
||||
'attn_qkv': 'Attention query-key-value',
|
||||
'attn_q': 'Attention query',
|
||||
'attn_k': 'Attention key',
|
||||
'attn_v': 'Attention value',
|
||||
'attn_output': 'Attention output',
|
||||
'ffn_norm': 'Feed-forward network normalization',
|
||||
'ffn_up': 'Feed-forward network "up"',
|
||||
'ffn_gate': 'Feed-forward network "gate"',
|
||||
'ffn_down': 'Feed-forward network "down"',
|
||||
'ffn_gate_inp': 'Expert-routing layer for the Feed-forward network in Mixture of Expert models',
|
||||
'ffn_gate_exp': 'Feed-forward network "gate" layer per expert in Mixture of Expert models',
|
||||
'ffn_down_exp': 'Feed-forward network "down" layer per expert in Mixture of Expert models',
|
||||
'ffn_up_exp': 'Feed-forward network "up" layer per expert in Mixture of Expert models',
|
||||
'ssm_in': 'State space model input projections',
|
||||
'ssm_conv1d': 'State space model rolling/shift',
|
||||
'ssm_x': 'State space model selective parametrization',
|
||||
'ssm_a': 'State space model state compression',
|
||||
'ssm_d': 'State space model skip connection',
|
||||
'ssm_dt': 'State space model time step',
|
||||
'ssm_out': 'State space model output projection',
|
||||
'blk': 'Block'
|
||||
}
|
||||
|
||||
expanded_words = []
|
||||
for word in words:
|
||||
word_norm = word.strip().lower()
|
||||
if word_norm in abbreviation_dictionary:
|
||||
expanded_words.append(abbreviation_dictionary[word_norm].title())
|
||||
else:
|
||||
expanded_words.append(word.title())
|
||||
|
||||
return ' '.join(expanded_words)
|
||||
|
||||
|
||||
def dump_markdown_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
|
||||
host_endian, file_endian = get_file_host_endian(reader)
|
||||
markdown_content = ""
|
||||
markdown_content += f'# {args.model} - GGUF Internal File Dump\n\n'
|
||||
markdown_content += f'- Endian: {file_endian} endian\n'
|
||||
markdown_content += '\n'
|
||||
markdown_content += '## Key Value Metadata Store\n\n'
|
||||
markdown_content += f'There are {len(reader.fields)} key-value pairs in this file\n'
|
||||
markdown_content += '\n'
|
||||
|
||||
kv_dump_table: list[dict[str, str | int]] = []
|
||||
for n, field in enumerate(reader.fields.values(), 1):
|
||||
if not field.types:
|
||||
pretty_type = 'N/A'
|
||||
elif field.types[0] == GGUFValueType.ARRAY:
|
||||
nest_count = len(field.types) - 1
|
||||
pretty_type = '[' * nest_count + str(field.types[-1].name) + ']' * nest_count
|
||||
else:
|
||||
pretty_type = str(field.types[-1].name)
|
||||
|
||||
total_elements = len(field.data)
|
||||
value = ""
|
||||
if len(field.types) == 1:
|
||||
curr_type = field.types[0]
|
||||
if curr_type == GGUFValueType.STRING:
|
||||
value = repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60])
|
||||
elif curr_type in reader.gguf_scalar_to_np:
|
||||
value = str(field.parts[-1][0])
|
||||
else:
|
||||
if field.types[0] == GGUFValueType.ARRAY:
|
||||
curr_type = field.types[1]
|
||||
if curr_type == GGUFValueType.STRING:
|
||||
render_element = min(5, total_elements)
|
||||
for element_pos in range(render_element):
|
||||
value += repr(str(bytes(field.parts[-1 - element_pos]), encoding='utf-8')[:5]) + (", " if total_elements > 1 else "")
|
||||
elif curr_type in reader.gguf_scalar_to_np:
|
||||
render_element = min(7, total_elements)
|
||||
for element_pos in range(render_element):
|
||||
value += str(field.parts[-1 - element_pos][0]) + (", " if total_elements > 1 else "")
|
||||
value = f'[ {value}{" ..." if total_elements > 1 else ""} ]'
|
||||
kv_dump_table.append({"n":n, "pretty_type":pretty_type, "total_elements":total_elements, "field_name":field.name, "value":value})
|
||||
|
||||
kv_dump_table_header_map = [
|
||||
{'key_name':'n', 'header_name':'POS', 'align':'right'},
|
||||
{'key_name':'pretty_type', 'header_name':'TYPE', 'align':'left'},
|
||||
{'key_name':'total_elements', 'header_name':'Count', 'align':'right'},
|
||||
{'key_name':'field_name', 'header_name':'Key', 'align':'left'},
|
||||
{'key_name':'value', 'header_name':'Value', 'align':'left'},
|
||||
]
|
||||
|
||||
markdown_content += markdown_table_with_alignment_support(kv_dump_table_header_map, kv_dump_table)
|
||||
|
||||
markdown_content += "\n"
|
||||
|
||||
if not args.no_tensors:
|
||||
# Group tensors by their prefix and maintain order
|
||||
tensor_prefix_order: list[str] = []
|
||||
tensor_name_to_key: dict[str, int] = {}
|
||||
tensor_groups: dict[str, list[ReaderTensor]] = {}
|
||||
total_elements = sum(tensor.n_elements for tensor in reader.tensors)
|
||||
|
||||
# Parsing Tensors Record
|
||||
for key, tensor in enumerate(reader.tensors):
|
||||
tensor_components = tensor.name.split('.')
|
||||
|
||||
# Classify Tensor Group
|
||||
tensor_group_name = "base"
|
||||
if tensor_components[0] == 'blk':
|
||||
tensor_group_name = f"{tensor_components[0]}.{tensor_components[1]}"
|
||||
|
||||
# Check if new Tensor Group
|
||||
if tensor_group_name not in tensor_groups:
|
||||
tensor_groups[tensor_group_name] = []
|
||||
tensor_prefix_order.append(tensor_group_name)
|
||||
|
||||
# Record Tensor and Tensor Position
|
||||
tensor_groups[tensor_group_name].append(tensor)
|
||||
tensor_name_to_key[tensor.name] = key
|
||||
|
||||
# Tensors Mapping Dump
|
||||
markdown_content += f'## Tensors Overview {element_count_rounded_notation(total_elements)} Elements\n\n'
|
||||
markdown_content += f'Total number of elements in all tensors: {total_elements} Elements\n'
|
||||
markdown_content += '\n'
|
||||
|
||||
for group in tensor_prefix_order:
|
||||
tensors = tensor_groups[group]
|
||||
group_elements = sum(tensor.n_elements for tensor in tensors)
|
||||
markdown_content += f"- [{translate_tensor_name(group)} Tensor Group - {element_count_rounded_notation(group_elements)} Elements](#{group.replace('.', '_')})\n"
|
||||
|
||||
markdown_content += "\n"
|
||||
|
||||
for group in tensor_prefix_order:
|
||||
tensors = tensor_groups[group]
|
||||
group_elements = sum(tensor.n_elements for tensor in tensors)
|
||||
group_percentage = group_elements / total_elements * 100
|
||||
markdown_content += f"### <a name=\"{group.replace('.', '_')}\">{translate_tensor_name(group)} Tensor Group : {element_count_rounded_notation(group_elements)} Elements</a>\n\n"
|
||||
|
||||
# Precalculate column sizing for visual consistency
|
||||
prettify_element_est_count_size: int = 1
|
||||
prettify_element_count_size: int = 1
|
||||
prettify_dimension_max_widths: dict[int, int] = {}
|
||||
for tensor in tensors:
|
||||
prettify_element_est_count_size = max(prettify_element_est_count_size, len(str(element_count_rounded_notation(tensor.n_elements))))
|
||||
prettify_element_count_size = max(prettify_element_count_size, len(str(tensor.n_elements)))
|
||||
for i, dimension_size in enumerate(list(tensor.shape) + [1] * (4 - len(tensor.shape))):
|
||||
prettify_dimension_max_widths[i] = max(prettify_dimension_max_widths.get(i,1), len(str(dimension_size)))
|
||||
|
||||
# Generate Tensor Layer Table Content
|
||||
tensor_dump_table: list[dict[str, str | int]] = []
|
||||
for tensor in tensors:
|
||||
human_friendly_name = translate_tensor_name(tensor.name.replace(".weight", ".(W)").replace(".bias", ".(B)"))
|
||||
pretty_dimension = ' x '.join(f'{str(d):>{prettify_dimension_max_widths[i]}}' for i, d in enumerate(list(tensor.shape) + [1] * (4 - len(tensor.shape))))
|
||||
element_count_est = f"({element_count_rounded_notation(tensor.n_elements):>{prettify_element_est_count_size}})"
|
||||
element_count_string = f"{element_count_est} {tensor.n_elements:>{prettify_element_count_size}}"
|
||||
type_name_string = f"{tensor.tensor_type.name}"
|
||||
tensor_dump_table.append({"t_id":tensor_name_to_key[tensor.name], "layer_name":tensor.name, "human_layer_name":human_friendly_name, "element_count":element_count_string, "pretty_dimension":pretty_dimension, "tensor_type":type_name_string})
|
||||
|
||||
tensor_dump_table_header_map = [
|
||||
{'key_name':'t_id', 'header_name':'T_ID', 'align':'right'},
|
||||
{'key_name':'layer_name', 'header_name':'Tensor Layer Name', 'align':'left'},
|
||||
{'key_name':'human_layer_name', 'header_name':'Human Friendly Tensor Layer Name', 'align':'left'},
|
||||
{'key_name':'element_count', 'header_name':'Elements', 'align':'left'},
|
||||
{'key_name':'pretty_dimension', 'header_name':'Shape', 'align':'left'},
|
||||
{'key_name':'tensor_type', 'header_name':'Type', 'align':'left'},
|
||||
]
|
||||
|
||||
markdown_content += markdown_table_with_alignment_support(tensor_dump_table_header_map, tensor_dump_table)
|
||||
|
||||
markdown_content += "\n"
|
||||
markdown_content += f"- Total elements in {group}: ({element_count_rounded_notation(group_elements):>4}) {group_elements}\n"
|
||||
markdown_content += f"- Percentage of total elements: {group_percentage:.2f}%\n"
|
||||
markdown_content += "\n\n"
|
||||
|
||||
print(markdown_content) # noqa: NP100
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Dump GGUF file metadata")
|
||||
parser.add_argument("model", type=str, help="GGUF format model filename")
|
||||
parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata")
|
||||
parser.add_argument("--json", action="store_true", help="Produce JSON output")
|
||||
parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)")
|
||||
parser.add_argument("--markdown", action="store_true", help="Produce markdown output")
|
||||
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||
|
||||
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
|
||||
|
||||
if not args.json:
|
||||
if not args.json and not args.markdown:
|
||||
logger.info(f'* Loading: {args.model}')
|
||||
|
||||
reader = GGUFReader(args.model, 'r')
|
||||
|
||||
if args.json:
|
||||
dump_metadata_json(reader, args)
|
||||
elif args.markdown:
|
||||
dump_markdown_metadata(reader, args)
|
||||
else:
|
||||
dump_metadata(reader, args)
|
||||
|
||||
|
|
224
llama.cpp
224
llama.cpp
|
@ -1278,6 +1278,126 @@ struct no_init {
|
|||
};
|
||||
|
||||
struct llama_file {
|
||||
|
||||
#if defined(_WIN32)
|
||||
// use FILE * so we don't have to re-open the file to mmap
|
||||
FILE * fp;
|
||||
HANDLE fp_win32;
|
||||
size_t size;
|
||||
|
||||
private:
|
||||
std::string GetErrorMessageWin32(DWORD error_code) const {
|
||||
std::string ret;
|
||||
LPSTR lpMsgBuf = NULL;
|
||||
DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL);
|
||||
if (!bufLen) {
|
||||
ret = format("Win32 error code: %s", error_code);
|
||||
} else {
|
||||
ret = lpMsgBuf;
|
||||
LocalFree(lpMsgBuf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
llama_file(const char * fname, const char * mode) {
|
||||
fp = ggml_fopen(fname, mode);
|
||||
if (fp == NULL) {
|
||||
throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
|
||||
}
|
||||
fp_win32 = (HANDLE) _get_osfhandle(_fileno(fp));
|
||||
seek(0, SEEK_END);
|
||||
size = tell();
|
||||
seek(0, SEEK_SET);
|
||||
}
|
||||
|
||||
size_t tell() const {
|
||||
// SetFilePointerEx returns the current position when seeking relative 0 bytes
|
||||
LARGE_INTEGER li;
|
||||
li.QuadPart = 0;
|
||||
BOOL ret = SetFilePointerEx(fp_win32, li, &li, FILE_CURRENT);
|
||||
if (!ret) {
|
||||
throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
|
||||
}
|
||||
|
||||
return li.QuadPart;
|
||||
}
|
||||
|
||||
void seek(size_t offset, int whence) const {
|
||||
// no need to convert SEEK_* to FILE_*. The enums are the same.
|
||||
// Still, keep static asserts to avoid failures in the future.
|
||||
static_assert(SEEK_SET == FILE_BEGIN, "SEEK_SET != FILE_BEGIN");
|
||||
static_assert(SEEK_CUR == FILE_CURRENT, "SEEK_CUR != FILE_CURRENT");
|
||||
static_assert(SEEK_END == FILE_END, "SEEK_END != FILE_END");
|
||||
|
||||
LARGE_INTEGER li;
|
||||
li.QuadPart = offset;
|
||||
BOOL ret = SetFilePointerEx(fp_win32, li, NULL, whence);
|
||||
if (!ret) {
|
||||
throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
|
||||
}
|
||||
}
|
||||
|
||||
void read_raw(void * ptr, size_t len) const {
|
||||
// On Win32 ReadFile is significant faster than fread which is again significant faster than std::fstream. Thus
|
||||
// use the Win32 API to do file io instead of the C/C++ library functions.
|
||||
|
||||
// There are conditions under which ReadFile cannot read chunks >64MB.
|
||||
// Thus split the operation into smaller chunks if len exceeds this limit.
|
||||
size_t bytes_read = 0;
|
||||
while (bytes_read < len) {
|
||||
size_t chunk_size = std::min<size_t>(len - bytes_read, 64*1024*1024);
|
||||
DWORD chunk_read = 0;
|
||||
BOOL result = ReadFile(fp_win32, reinterpret_cast<char*>(ptr) + bytes_read, chunk_size, &chunk_read, NULL);
|
||||
if (!result) {
|
||||
throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
|
||||
}
|
||||
if (chunk_read < chunk_size || chunk_read == 0) {
|
||||
throw std::runtime_error("unexpectedly reached end of file");
|
||||
}
|
||||
|
||||
bytes_read += chunk_read;
|
||||
} ;
|
||||
}
|
||||
|
||||
uint32_t read_u32() const {
|
||||
uint32_t val;
|
||||
read_raw(&val, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
void write_raw(const void * ptr, size_t len) const {
|
||||
// There are conditions under which WriteFile cannot write chunks >64MB.
|
||||
// Thus split the operation into smaller chunks if len exceeds this limit.
|
||||
size_t bytes_written = 0;
|
||||
while (bytes_written < len) {
|
||||
size_t chunk_size = std::min<size_t>(len - bytes_written, 64*1024*1024);
|
||||
DWORD chunk_written = 0;
|
||||
BOOL result = WriteFile(fp_win32, reinterpret_cast<char const*>(ptr) + bytes_written, chunk_size, &chunk_written, NULL);
|
||||
if (!result) {
|
||||
throw std::runtime_error(format("write error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
|
||||
}
|
||||
if (chunk_written < chunk_size || chunk_written == 0) {
|
||||
throw std::runtime_error("unexpectedly failed to write bytes");
|
||||
}
|
||||
|
||||
bytes_written += chunk_written;
|
||||
}
|
||||
}
|
||||
|
||||
void write_u32(std::uint32_t val) const {
|
||||
write_raw(&val, sizeof(val));
|
||||
}
|
||||
|
||||
~llama_file() {
|
||||
if (fp) {
|
||||
std::fclose(fp);
|
||||
}
|
||||
}
|
||||
#else
|
||||
// use FILE * so we don't have to re-open the file to mmap
|
||||
FILE * fp;
|
||||
size_t size;
|
||||
|
@ -1298,7 +1418,10 @@ struct llama_file {
|
|||
#else
|
||||
long ret = std::ftell(fp);
|
||||
#endif
|
||||
GGML_ASSERT(ret != -1); // this really shouldn't fail
|
||||
if (ret == -1) {
|
||||
throw std::runtime_error(format("ftell error: %s", strerror(errno)));
|
||||
}
|
||||
|
||||
return (size_t) ret;
|
||||
}
|
||||
|
||||
|
@ -1308,7 +1431,9 @@ struct llama_file {
|
|||
#else
|
||||
int ret = std::fseek(fp, (long) offset, whence);
|
||||
#endif
|
||||
GGML_ASSERT(ret == 0); // same
|
||||
if (ret != 0) {
|
||||
throw std::runtime_error(format("seek error: %s", strerror(errno)));
|
||||
}
|
||||
}
|
||||
|
||||
void read_raw(void * ptr, size_t len) const {
|
||||
|
@ -1351,6 +1476,7 @@ struct llama_file {
|
|||
std::fclose(fp);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
};
|
||||
using llama_files = std::vector<std::unique_ptr<llama_file>>;
|
||||
|
||||
|
@ -3722,6 +3848,44 @@ struct llama_model_loader {
|
|||
std::vector<no_init<uint8_t>> read_buf;
|
||||
std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result;
|
||||
|
||||
#if defined(GGML_USE_CUDA)
|
||||
// 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
|
||||
// NVMe raid configurations might require more / larger buffers.
|
||||
constexpr size_t num_buffers = 4;
|
||||
constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
|
||||
|
||||
std::vector<ggml_backend_buffer_t> host_buffers;
|
||||
std::vector<void*> host_ptrs;
|
||||
std::vector<ggml_backend_event_t> events;
|
||||
size_t buffer_idx = 0; // buffer to use for async loads
|
||||
|
||||
ggml_backend_t cuda_backend = nullptr;
|
||||
if (!use_mmap && !check_tensors) {
|
||||
// When not using mmaped io use async uploads from pinned memory to GPU memory.
|
||||
// First determine if the CUDA backend is active, and if so, determine the device ID.
|
||||
ggml_backend_buffer_t buf = bufs_mmap.count(0) ? bufs_mmap.at(0) : nullptr;
|
||||
if (buf) {
|
||||
ggml_backend_buffer_type_t buffer_type = ggml_backend_buffer_get_type(buf);
|
||||
for (int i = 0; i < ggml_backend_cuda_get_device_count(); ++i) {
|
||||
auto * cuda_buffer_type = ggml_backend_cuda_buffer_type(i);
|
||||
if (buffer_type == cuda_buffer_type) {
|
||||
cuda_backend = ggml_backend_cuda_init(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the cuda backend is active create pinned memory buffers and events for synchronisation.
|
||||
if (cuda_backend) {
|
||||
for (size_t idx = 0; idx < num_buffers; ++idx) {
|
||||
host_buffers.emplace_back(ggml_backend_buft_alloc_buffer(llama_default_buffer_type_cpu(true), buffer_size));
|
||||
host_ptrs.emplace_back(ggml_backend_buffer_get_base(host_buffers[idx]));
|
||||
events.emplace_back(ggml_backend_event_new(cuda_backend));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
|
||||
const auto * weight = get_weight(ggml_get_name(cur));
|
||||
if (weight == nullptr) {
|
||||
|
@ -3777,6 +3941,29 @@ struct llama_model_loader {
|
|||
}));
|
||||
}
|
||||
} else {
|
||||
#if defined(GGML_USE_CUDA)
|
||||
// If cuda_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
|
||||
if (cuda_backend) {
|
||||
file->seek(weight->offs, SEEK_SET);
|
||||
|
||||
size_t bytes_read = 0;
|
||||
|
||||
while (bytes_read < n_size) {
|
||||
size_t read_iteration = std::min<size_t>(buffer_size, n_size - bytes_read);
|
||||
|
||||
ggml_backend_event_synchronize(events[buffer_idx]);
|
||||
file->read_raw(host_ptrs[buffer_idx], read_iteration);
|
||||
ggml_backend_tensor_set_async(cuda_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
|
||||
ggml_backend_event_record(events[buffer_idx]);
|
||||
|
||||
bytes_read += read_iteration;
|
||||
++buffer_idx;
|
||||
buffer_idx %= num_buffers;
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
read_buf.resize(n_size);
|
||||
file->seek(weight->offs, SEEK_SET);
|
||||
file->read_raw(read_buf.data(), n_size);
|
||||
|
@ -3786,10 +3973,23 @@ struct llama_model_loader {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_done += n_size;
|
||||
}
|
||||
|
||||
#if defined(GGML_USE_CUDA)
|
||||
// free temporary resources used for async cuda uploads
|
||||
if (cuda_backend) {
|
||||
for (size_t idx = 0; idx < num_buffers;++idx) {
|
||||
ggml_backend_event_synchronize(events[idx]);
|
||||
ggml_backend_event_free(events[idx]);
|
||||
ggml_backend_buffer_free(host_buffers[idx]);
|
||||
}
|
||||
ggml_backend_free(cuda_backend);
|
||||
}
|
||||
#endif
|
||||
|
||||
// check validation results
|
||||
bool validation_failed = false;
|
||||
for (auto & future : validation_result) {
|
||||
|
@ -5194,7 +5394,7 @@ static bool llm_load_tensors(
|
|||
// create tensors for the weights
|
||||
{
|
||||
const int64_t n_embd = hparams.n_embd;
|
||||
const int64_t n_embd_head = n_embd / hparams.n_head;
|
||||
const int64_t n_embd_head = (hparams.n_head == 0) ? 0 : n_embd / hparams.n_head;
|
||||
const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
|
||||
const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
|
||||
const int64_t n_embd_gqa = n_embd_v_gqa;
|
||||
|
@ -6636,16 +6836,6 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_SYCL
|
||||
if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
|
||||
ggml_backend_sycl_set_single_device_mode(params.main_gpu);
|
||||
//SYCL use device index (0, 1, 2) directly, uer input device id, then convert to device index.
|
||||
params.main_gpu = ggml_backend_sycl_get_device_index(params.main_gpu);
|
||||
} else {
|
||||
ggml_backend_sycl_set_mul_device_mode();
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!llm_load_tensors(
|
||||
ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock,
|
||||
params.progress_callback, params.progress_callback_user_data
|
||||
|
@ -16105,6 +16295,11 @@ struct llama_context * llama_new_context_with_model(
|
|||
params.flash_attn = false;
|
||||
}
|
||||
|
||||
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
|
||||
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
|
||||
params.flash_attn = false;
|
||||
}
|
||||
|
||||
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
|
||||
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
|
||||
return nullptr;
|
||||
|
@ -16276,8 +16471,7 @@ struct llama_context * llama_new_context_with_model(
|
|||
if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
|
||||
ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
|
||||
if (backend == nullptr) {
|
||||
int main_gpu_id = ggml_backend_sycl_get_device_id(model->main_gpu);
|
||||
LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d (index %d) backend\n", __func__, main_gpu_id, model->main_gpu);
|
||||
LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d backend\n", __func__, model->main_gpu);
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -1063,6 +1063,33 @@ struct test_sqr : public test_case {
|
|||
}
|
||||
};
|
||||
|
||||
// GGML_OP_SQRT
|
||||
struct test_sqrt : public test_case {
|
||||
const ggml_type type;
|
||||
const std::array<int64_t, 4> ne;
|
||||
|
||||
std::string vars() override {
|
||||
return VARS_TO_STR2(type, ne);
|
||||
}
|
||||
|
||||
test_sqrt(ggml_type type = GGML_TYPE_F32,
|
||||
std::array<int64_t, 4> ne = {10, 10, 10, 10})
|
||||
: type(type), ne(ne) {}
|
||||
|
||||
ggml_tensor * build_graph(ggml_context * ctx) override {
|
||||
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
|
||||
ggml_tensor * out = ggml_sqrt(ctx, a);
|
||||
return out;
|
||||
}
|
||||
|
||||
void initialize_tensors(ggml_context * ctx) override {
|
||||
// fill with positive values
|
||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
||||
init_tensor_uniform(t, 0.0f, 100.0f);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// GGML_OP_CLAMP
|
||||
struct test_clamp : public test_case {
|
||||
const ggml_type type;
|
||||
|
@ -2200,6 +2227,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
|||
}
|
||||
|
||||
test_cases.emplace_back(new test_sqr());
|
||||
test_cases.emplace_back(new test_sqrt());
|
||||
test_cases.emplace_back(new test_clamp());
|
||||
|
||||
test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
|
||||
|
|
12
vulkan-shaders/add.comp
Normal file
12
vulkan-shaders/add.comp
Normal file
|
@ -0,0 +1,12 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_binary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]) + FLOAT_TYPE(data_b[src1_idx(gl_GlobalInvocationID.x)]));
|
||||
}
|
71
vulkan-shaders/argsort.comp
Normal file
71
vulkan-shaders/argsort.comp
Normal file
|
@ -0,0 +1,71 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
|
||||
#define BLOCK_SIZE 1024
|
||||
#define ASC 0
|
||||
|
||||
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) buffer D {int data_d[];};
|
||||
|
||||
layout (push_constant) uniform parameter {
|
||||
uint ncols;
|
||||
uint ncols_pad;
|
||||
uint order;
|
||||
} p;
|
||||
|
||||
shared int dst_row[BLOCK_SIZE];
|
||||
|
||||
void swap(uint idx0, uint idx1) {
|
||||
int tmp = dst_row[idx0];
|
||||
dst_row[idx0] = dst_row[idx1];
|
||||
dst_row[idx1] = tmp;
|
||||
}
|
||||
|
||||
void main() {
|
||||
// bitonic sort
|
||||
const int col = int(gl_LocalInvocationID.x);
|
||||
const uint row = gl_WorkGroupID.y;
|
||||
|
||||
if (col >= p.ncols_pad) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint row_offset = row * p.ncols;
|
||||
|
||||
// initialize indices
|
||||
dst_row[col] = col;
|
||||
barrier();
|
||||
|
||||
for (uint k = 2; k <= p.ncols_pad; k *= 2) {
|
||||
for (uint j = k / 2; j > 0; j /= 2) {
|
||||
const uint ixj = col ^ j;
|
||||
if (ixj > col) {
|
||||
if ((col & k) == 0) {
|
||||
if (dst_row[col] >= p.ncols ||
|
||||
(dst_row[ixj] < p.ncols && (p.order == ASC ?
|
||||
data_a[row_offset + dst_row[col]] > data_a[row_offset + dst_row[ixj]] :
|
||||
data_a[row_offset + dst_row[col]] < data_a[row_offset + dst_row[ixj]]))
|
||||
) {
|
||||
swap(col, ixj);
|
||||
}
|
||||
} else {
|
||||
if (dst_row[ixj] >= p.ncols ||
|
||||
(dst_row[col] < p.ncols && (p.order == ASC ?
|
||||
data_a[row_offset + dst_row[col]] < data_a[row_offset + dst_row[ixj]] :
|
||||
data_a[row_offset + dst_row[col]] > data_a[row_offset + dst_row[ixj]]))
|
||||
) {
|
||||
swap(col, ixj);
|
||||
}
|
||||
}
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
}
|
||||
|
||||
if (col < p.ncols) {
|
||||
data_d[row_offset + col] = dst_row[col];
|
||||
}
|
||||
}
|
13
vulkan-shaders/clamp.comp
Normal file
13
vulkan-shaders/clamp.comp
Normal file
|
@ -0,0 +1,13 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_unary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]);
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(val < p.param1 ? p.param1 : (val > p.param2 ? p.param2 : val));
|
||||
}
|
16
vulkan-shaders/copy.comp
Normal file
16
vulkan-shaders/copy.comp
Normal file
|
@ -0,0 +1,16 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_unary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef OPTIMIZATION_ERROR_WORKAROUND
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]);
|
||||
#else
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = data_a[src0_idx(gl_GlobalInvocationID.x)];
|
||||
#endif
|
||||
}
|
20
vulkan-shaders/dequant_f32.comp
Normal file
20
vulkan-shaders/dequant_f32.comp
Normal file
|
@ -0,0 +1,20 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {float data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_GlobalInvocationID.x * 16;
|
||||
|
||||
if (i >= p.nel) {
|
||||
return;
|
||||
}
|
||||
|
||||
[[unroll]] for (uint l = 0; l < 16; l++) {
|
||||
data_b[i + l] = D_TYPE(data_a[i + l]);
|
||||
}
|
||||
}
|
60
vulkan-shaders/dequant_funcs.comp
Normal file
60
vulkan-shaders/dequant_funcs.comp
Normal file
|
@ -0,0 +1,60 @@
|
|||
#if !defined(DATA_A_F32) && !defined(DATA_A_F16)
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_F32)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_F16)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q4_0)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
const float d = float(data_a[a_offset + ib].d);
|
||||
const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
|
||||
return (vec2(vui & 0xF, vui >> 4) - 8.0f) * d;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q4_1)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
const float d = float(data_a[a_offset + ib].d);
|
||||
const float m = float(data_a[a_offset + ib].m);
|
||||
const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
|
||||
return vec2(vui & 0xF, vui >> 4) * d + m;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q5_0)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
const float d = float(data_a[a_offset + ib].d);
|
||||
const uint uint_qh = uint(data_a[a_offset + ib].qh[1]) << 16 | data_a[a_offset + ib].qh[0];
|
||||
const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
|
||||
const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
|
||||
return (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f) * d;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q5_1)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
const float d = float(data_a[a_offset + ib].d);
|
||||
const float m = float(data_a[a_offset + ib].m);
|
||||
const uint uint_qh = data_a[a_offset + ib].qh;
|
||||
const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
|
||||
const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
|
||||
return vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) * d + m;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q8_0)
|
||||
vec2 dequantize(uint ib, uint iqs, uint a_offset) {
|
||||
const float d = float(data_a[a_offset + ib].d);
|
||||
return vec2(int(data_a[a_offset + ib].qs[iqs]), int(data_a[a_offset + ib].qs[iqs + 1])) * d;
|
||||
}
|
||||
#endif
|
13
vulkan-shaders/dequant_head.comp
Normal file
13
vulkan-shaders/dequant_head.comp
Normal file
|
@ -0,0 +1,13 @@
|
|||
#extension GL_EXT_control_flow_attributes : require
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint M;
|
||||
uint K;
|
||||
uint stride_a;
|
||||
uint stride_b;
|
||||
uint nel;
|
||||
} p;
|
||||
|
||||
#include "types.comp"
|
34
vulkan-shaders/dequant_q2_k.comp
Normal file
34
vulkan-shaders/dequant_q2_k.comp
Normal file
|
@ -0,0 +1,34 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
[[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
|
||||
const uint i = gl_WorkGroupID.x * 256 + wgy;
|
||||
if (i >= p.M * p.K / QUANT_K) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint ip = tid / 32;
|
||||
const uint il = tid - 32 * ip;
|
||||
const uint is = 8 * ip + il / 16;
|
||||
|
||||
const uint y_idx = i * QUANT_K + 128 * ip + il;
|
||||
|
||||
const uint ql_idx = 32 * ip + il;
|
||||
const uint8_t qs = data_a[i].qs[32 * ip + il];
|
||||
|
||||
FLOAT_TYPE dall = FLOAT_TYPE(data_a[i].d.x);
|
||||
FLOAT_TYPE dmin = FLOAT_TYPE(data_a[i].d.y);
|
||||
data_b[y_idx + 0] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+0] & 0xF) * ((qs >> 0) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+0] >> 4));
|
||||
data_b[y_idx + 32] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+2] & 0xF) * ((qs >> 2) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+2] >> 4));
|
||||
data_b[y_idx + 64] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+4] & 0xF) * ((qs >> 4) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+4] >> 4));
|
||||
data_b[y_idx + 96] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+6] & 0xF) * ((qs >> 6) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+6] >> 4));
|
||||
}
|
||||
}
|
42
vulkan-shaders/dequant_q3_k.comp
Normal file
42
vulkan-shaders/dequant_q3_k.comp
Normal file
|
@ -0,0 +1,42 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
[[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
|
||||
const uint i = uint(gl_WorkGroupID.x * 256 + wgy);
|
||||
if (i >= p.M * p.K / QUANT_K) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint r = gl_LocalInvocationID.x / 4;
|
||||
const uint tid = r / 2;
|
||||
const uint is0 = r % 2;
|
||||
const uint l0 = 16 * is0 + 4 * (gl_LocalInvocationID.x % 4);
|
||||
const uint n = tid / 4;
|
||||
const uint j = tid - 4*n;
|
||||
|
||||
const uint8_t m = uint8_t(1 << (4*n + j));
|
||||
const uint is = 8*n + 2*j + is0;
|
||||
const uint shift = 2*j;
|
||||
|
||||
const int8_t us = int8_t(is < 4 ? (data_a[i].scales[is-0] & 0xF) | (((data_a[i].scales[is+8] >> 0) & 3) << 4) :
|
||||
is < 8 ? (data_a[i].scales[is-0] & 0xF) | (((data_a[i].scales[is+4] >> 2) & 3) << 4) :
|
||||
is < 12 ? (data_a[i].scales[is-8] >> 4) | (((data_a[i].scales[is+0] >> 4) & 3) << 4) :
|
||||
(data_a[i].scales[is-8] >> 4) | (((data_a[i].scales[is-4] >> 6) & 3) << 4));
|
||||
const FLOAT_TYPE d_all = FLOAT_TYPE(data_a[i].d);
|
||||
const FLOAT_TYPE dl = d_all * FLOAT_TYPE(us - 32);
|
||||
|
||||
const uint y_idx = i * QUANT_K + 128 * n + 32 * j;
|
||||
const uint qs_idx = 32*n;
|
||||
|
||||
for (uint l = l0; l < l0 + 4; ++l) {
|
||||
data_b[y_idx + l] = D_TYPE(dl * FLOAT_TYPE(int8_t((data_a[i].qs[qs_idx + l] >> shift) & 3) - (((data_a[i].hmask[l] & m) != 0) ? 0 : 4)));
|
||||
}
|
||||
}
|
||||
}
|
32
vulkan-shaders/dequant_q4_0.comp
Normal file
32
vulkan-shaders/dequant_q4_0.comp
Normal file
|
@ -0,0 +1,32 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {block_q4_0 data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x % 64;
|
||||
const uint il = tid/32;
|
||||
const uint ir = tid%32;
|
||||
const uint ib = 32*i + ir;
|
||||
if (ib >= p.nel / 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint b_idx = 1024*i + 32*ir + 8*il;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const float dm = -8.0f * d;
|
||||
|
||||
const uint q_idx = 8*il;
|
||||
|
||||
[[unroll]] for (uint l = 0; l < 8; ++l) {
|
||||
data_b[b_idx + l + 0] = D_TYPE(d * (data_a[ib].qs[q_idx + l] & 0xF) + dm);
|
||||
data_b[b_idx + l + 16] = D_TYPE(d * (data_a[ib].qs[q_idx + l] >> 4) + dm);
|
||||
}
|
||||
}
|
32
vulkan-shaders/dequant_q4_1.comp
Normal file
32
vulkan-shaders/dequant_q4_1.comp
Normal file
|
@ -0,0 +1,32 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {block_q4_1 data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x % 64;
|
||||
const uint il = tid/32;
|
||||
const uint ir = tid%32;
|
||||
const uint ib = 32*i + ir;
|
||||
if (ib >= p.nel / 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint b_idx = 1024*i + 32*ir + 8*il;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const float m = float(data_a[ib].m);
|
||||
|
||||
const uint q_idx = 8*il;
|
||||
|
||||
[[unroll]] for (uint l = 0; l < 8; ++l) {
|
||||
data_b[b_idx + l + 0] = D_TYPE(d * (data_a[ib].qs[q_idx + l] & 0xF) + m);
|
||||
data_b[b_idx + l + 16] = D_TYPE(d * (data_a[ib].qs[q_idx + l] >> 4) + m);
|
||||
}
|
||||
}
|
56
vulkan-shaders/dequant_q4_k.comp
Normal file
56
vulkan-shaders/dequant_q4_k.comp
Normal file
|
@ -0,0 +1,56 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
[[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
|
||||
const uint i = gl_WorkGroupID.x * 256 + wgy;
|
||||
if (i >= p.M * p.K / QUANT_K) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint il = tid / 8;
|
||||
const uint ir = tid % 8;
|
||||
const uint is = 2 * il;
|
||||
const uint n = 4;
|
||||
|
||||
const FLOAT_TYPE dall = FLOAT_TYPE(data_a[i].d.x);
|
||||
const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[i].d.y);
|
||||
|
||||
const uint y_idx = i * QUANT_K + 64 * il + n * ir;
|
||||
const uint qs_idx = 32*il + n * ir;
|
||||
|
||||
uint8_t sc;
|
||||
uint8_t m;
|
||||
if (is < 4) {
|
||||
sc = uint8_t(data_a[i].scales[is] & 63);
|
||||
m = uint8_t(data_a[i].scales[is + 4] & 63);
|
||||
} else {
|
||||
sc = uint8_t((data_a[i].scales[is + 4] & 0xF) | ((data_a[i].scales[is - 4] >> 6) << 4));
|
||||
m = uint8_t((data_a[i].scales[is + 4] >> 4) | ((data_a[i].scales[is ] >> 6) << 4));
|
||||
}
|
||||
const FLOAT_TYPE d1 = dall * sc;
|
||||
const FLOAT_TYPE m1 = dmin * m;
|
||||
|
||||
if (is < 4) {
|
||||
sc = uint8_t(data_a[i].scales[is + 1] & 63);
|
||||
m = uint8_t(data_a[i].scales[is + 5] & 63);
|
||||
} else {
|
||||
sc = uint8_t((data_a[i].scales[is + 5] & 0xF) | ((data_a[i].scales[is - 3] >> 6) << 4));
|
||||
m = uint8_t((data_a[i].scales[is + 5] >> 4) | ((data_a[i].scales[is + 1] >> 6) << 4));
|
||||
}
|
||||
const FLOAT_TYPE d2 = dall * sc;
|
||||
const FLOAT_TYPE m2 = dmin * m;
|
||||
|
||||
[[unroll]] for (uint l = 0; l < n; ++l) {
|
||||
data_b[y_idx + l ] = D_TYPE(d1 * FLOAT_TYPE(data_a[i].qs[qs_idx + l] & 0xF) - m1);
|
||||
data_b[y_idx + l + 32] = D_TYPE(d2 * FLOAT_TYPE(data_a[i].qs[qs_idx + l] >> 4) - m2);
|
||||
}
|
||||
}
|
||||
}
|
34
vulkan-shaders/dequant_q5_0.comp
Normal file
34
vulkan-shaders/dequant_q5_0.comp
Normal file
|
@ -0,0 +1,34 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {block_q5_0 data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x % 64;
|
||||
const uint il = tid/32;
|
||||
const uint ir = tid%32;
|
||||
const uint ib = 32*i + ir;
|
||||
if (ib >= p.nel / 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint b_idx = 1024*i + 32*ir + 8*il;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const uint qh = uint(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0];
|
||||
|
||||
const uint q_idx = 8*il;
|
||||
|
||||
[[unroll]] for (uint l = 0; l < 8; ++l) {
|
||||
const uint iqs = q_idx + l;
|
||||
const uint vui = uint(data_a[ib].qs[iqs]);
|
||||
data_b[b_idx + l + 0] = D_TYPE(d * (((vui & 0xF) | (((qh >> iqs) << 4) & 0x10)) - 16.0f));
|
||||
data_b[b_idx + l + 16] = D_TYPE(d * (((vui >> 4) | ((qh >> (iqs + 12)) & 0x10)) - 16.0f));
|
||||
}
|
||||
}
|
35
vulkan-shaders/dequant_q5_1.comp
Normal file
35
vulkan-shaders/dequant_q5_1.comp
Normal file
|
@ -0,0 +1,35 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {block_q5_1 data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x % 64;
|
||||
const uint il = tid/32;
|
||||
const uint ir = tid%32;
|
||||
const uint ib = 32*i + ir;
|
||||
if (ib >= p.nel / 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint b_idx = 1024*i + 32*ir + 8*il;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const float m = float(data_a[ib].m);
|
||||
const uint qh = data_a[ib].qh;
|
||||
|
||||
const uint q_idx = 8*il;
|
||||
|
||||
[[unroll]] for (uint l = 0; l < 8; ++l) {
|
||||
const uint iqs = q_idx + l;
|
||||
const uint vui = uint(data_a[ib].qs[iqs]);
|
||||
data_b[b_idx + l + 0] = D_TYPE(d * (((vui & 0xF) | (((qh >> iqs) << 4) & 0x10))) + m);
|
||||
data_b[b_idx + l + 16] = D_TYPE(d * (((vui >> 4) | ((qh >> (iqs + 12)) & 0x10))) + m);
|
||||
}
|
||||
}
|
58
vulkan-shaders/dequant_q5_k.comp
Normal file
58
vulkan-shaders/dequant_q5_k.comp
Normal file
|
@ -0,0 +1,58 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
[[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
|
||||
const uint i = gl_WorkGroupID.x * 256 + wgy;
|
||||
if (i >= p.M * p.K / QUANT_K) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint il = tid / 16;
|
||||
const uint ir = tid % 16;
|
||||
const uint is = 2 * il;
|
||||
|
||||
const FLOAT_TYPE dall = FLOAT_TYPE(data_a[i].d.x);
|
||||
const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[i].d.y);
|
||||
|
||||
const uint y_idx = i * QUANT_K + 64 * il + 2 * ir;
|
||||
const uint qs_idx = 32*il + 2 * ir;
|
||||
const uint qh_idx = 2 * ir;
|
||||
|
||||
uint8_t sc;
|
||||
uint8_t m;
|
||||
if (is < 4) {
|
||||
sc = uint8_t(data_a[i].scales[is] & 63);
|
||||
m = uint8_t(data_a[i].scales[is + 4] & 63);
|
||||
} else {
|
||||
sc = uint8_t((data_a[i].scales[is + 4] & 0xF) | ((data_a[i].scales[is - 4] >> 6) << 4));
|
||||
m = uint8_t((data_a[i].scales[is + 4] >> 4) | ((data_a[i].scales[is ] >> 6) << 4));
|
||||
}
|
||||
const FLOAT_TYPE d1 = dall * sc;
|
||||
const FLOAT_TYPE m1 = dmin * m;
|
||||
|
||||
if (is < 4) {
|
||||
sc = uint8_t(data_a[i].scales[is + 1] & 63);
|
||||
m = uint8_t(data_a[i].scales[is + 5] & 63);
|
||||
} else {
|
||||
sc = uint8_t((data_a[i].scales[is + 5] & 0xF) | ((data_a[i].scales[is - 3] >> 6) << 4));
|
||||
m = uint8_t((data_a[i].scales[is + 5] >> 4) | ((data_a[i].scales[is + 1] >> 6) << 4));
|
||||
}
|
||||
const FLOAT_TYPE d2 = dall * sc;
|
||||
const FLOAT_TYPE m2 = dmin * m;
|
||||
|
||||
const uint8_t hm1 = uint8_t(1 << (2 * il ));
|
||||
const uint8_t hm2 = uint8_t(1 << (2 * il + 1));
|
||||
data_b[y_idx ] = D_TYPE(d1 * FLOAT_TYPE((data_a[i].qs[qs_idx ] & 0xF) + (((data_a[i].qh[qh_idx ] & hm1) != 0) ? 16 : 0)) - m1);
|
||||
data_b[y_idx + 1] = D_TYPE(d1 * FLOAT_TYPE((data_a[i].qs[qs_idx + 1] & 0xF) + (((data_a[i].qh[qh_idx + 1] & hm1) != 0) ? 16 : 0)) - m1);
|
||||
data_b[y_idx + 32] = D_TYPE(d2 * FLOAT_TYPE((data_a[i].qs[qs_idx ] >> 4) + (((data_a[i].qh[qh_idx ] & hm2) != 0) ? 16 : 0)) - m2);
|
||||
data_b[y_idx + 33] = D_TYPE(d2 * FLOAT_TYPE((data_a[i].qs[qs_idx + 1] >> 4) + (((data_a[i].qh[qh_idx + 1] & hm2) != 0) ? 16 : 0)) - m2);
|
||||
}
|
||||
}
|
33
vulkan-shaders/dequant_q6_k.comp
Normal file
33
vulkan-shaders/dequant_q6_k.comp
Normal file
|
@ -0,0 +1,33 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
[[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
|
||||
const uint i = gl_WorkGroupID.x * 256 + wgy;
|
||||
if (i >= p.M * p.K / QUANT_K) {
|
||||
return;
|
||||
}
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint ip = tid / 32;
|
||||
const uint il = tid - 32 * ip;
|
||||
const uint is = 8 * ip + il / 16;
|
||||
|
||||
const uint y_idx = i * QUANT_K + 128 * ip + il;
|
||||
|
||||
const uint ql_idx = 64 * ip + il;
|
||||
const uint8_t qh = data_a[i].qh[32 * ip + il];
|
||||
|
||||
const FLOAT_TYPE d = FLOAT_TYPE(data_a[i].d);
|
||||
|
||||
data_b[y_idx + 0] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 0] * (int8_t((data_a[i].ql[ql_idx + 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32)));
|
||||
data_b[y_idx + 32] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 2] * (int8_t((data_a[i].ql[ql_idx + 32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32)));
|
||||
data_b[y_idx + 64] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 4] * (int8_t((data_a[i].ql[ql_idx + 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32)));
|
||||
data_b[y_idx + 96] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 6] * (int8_t((data_a[i].ql[ql_idx + 32] >> 4) | (((qh >> 6) & 3) << 4)) - 32)));
|
||||
}
|
||||
}
|
31
vulkan-shaders/dequant_q8_0.comp
Normal file
31
vulkan-shaders/dequant_q8_0.comp
Normal file
|
@ -0,0 +1,31 @@
|
|||
#version 450
|
||||
|
||||
#include "dequant_head.comp"
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {block_q8_0 data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x % 64;
|
||||
const uint il = tid/32;
|
||||
const uint ir = tid%32;
|
||||
const uint ib = 32*i + ir;
|
||||
if (ib >= p.nel / 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint b_idx = 1024*i + 32*ir + 16*il;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
|
||||
const uint q_idx = 16*il;
|
||||
|
||||
[[unroll]] for (uint l = 0; l < 16; l += 2) {
|
||||
data_b[b_idx + l ] = D_TYPE(d * data_a[ib].qs[q_idx + l ]);
|
||||
data_b[b_idx + l + 1] = D_TYPE(d * data_a[ib].qs[q_idx + l + 1]);
|
||||
}
|
||||
}
|
34
vulkan-shaders/diag_mask_inf.comp
Normal file
34
vulkan-shaders/diag_mask_inf.comp
Normal file
|
@ -0,0 +1,34 @@
|
|||
#version 450
|
||||
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint ncols;
|
||||
uint rows_per_channel;
|
||||
uint n_past;
|
||||
} p;
|
||||
|
||||
#include "types.comp"
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
void main() {
|
||||
const uint col = gl_GlobalInvocationID.y;
|
||||
const uint row = gl_GlobalInvocationID.x;
|
||||
|
||||
if (col >= p.ncols) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint i = row*p.ncols + col;
|
||||
if (col > p.n_past + row % p.rows_per_channel) {
|
||||
data_d[i] = D_TYPE(uintBitsToFloat(0xFF800000));
|
||||
} else {
|
||||
data_d[i] = D_TYPE(data_a[i]);
|
||||
}
|
||||
}
|
12
vulkan-shaders/div.comp
Normal file
12
vulkan-shaders/div.comp
Normal file
|
@ -0,0 +1,12 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_binary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]) / FLOAT_TYPE(data_b[src1_idx(gl_GlobalInvocationID.x)]));
|
||||
}
|
25
vulkan-shaders/gelu.comp
Normal file
25
vulkan-shaders/gelu.comp
Normal file
|
@ -0,0 +1,25 @@
|
|||
#version 450
|
||||
|
||||
#include "generic_head.comp"
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
void main() {
|
||||
const float GELU_COEF_A = 0.044715f;
|
||||
const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
|
||||
const uint i = gl_GlobalInvocationID.x;
|
||||
|
||||
if (i >= p.KX) {
|
||||
return;
|
||||
}
|
||||
|
||||
const float xi = float(data_a[i]);
|
||||
const float val = SQRT_2_OVER_PI*xi*(1.0f + GELU_COEF_A*xi*xi);
|
||||
data_d[i] = D_TYPE(0.5f*xi*(2.0f - 2.0f / (exp(2 * val) + 1)));
|
||||
}
|
48
vulkan-shaders/generic_binary_head.comp
Normal file
48
vulkan-shaders/generic_binary_head.comp
Normal file
|
@ -0,0 +1,48 @@
|
|||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint ne;
|
||||
uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
|
||||
uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
|
||||
uint ne20; uint ne21; uint ne22; uint ne23; uint nb20; uint nb21; uint nb22; uint nb23;
|
||||
uint d_offset;
|
||||
float param1; float param2;
|
||||
} p;
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
|
||||
layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
uint src0_idx(uint idx) {
|
||||
const uint i03 = idx / (p.ne02*p.ne01*p.ne00);
|
||||
const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
|
||||
const uint i02 = (idx - i03_offset) / (p.ne01*p.ne00);
|
||||
const uint i02_offset = i02*p.ne01*p.ne00;
|
||||
const uint i01 = (idx - i03_offset - i02_offset) / p.ne00;
|
||||
const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
|
||||
return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
|
||||
}
|
||||
|
||||
uint src1_idx(uint idx) {
|
||||
const uint i03 = idx / (p.ne02*p.ne01*p.ne00);
|
||||
const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
|
||||
const uint i02 = (idx - i03_offset) / (p.ne01*p.ne00);
|
||||
const uint i02_offset = i02*p.ne01*p.ne00;
|
||||
const uint i01 = (idx - i03_offset - i02_offset) / p.ne00;
|
||||
const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
|
||||
|
||||
return (i03 % p.ne13)*p.nb13 + (i02 % p.ne12)*p.nb12 + (i01 % p.ne11)*p.nb11 + (i00 % p.ne10)*p.nb10;
|
||||
}
|
||||
|
||||
uint dst_idx(uint idx) {
|
||||
const uint i23 = idx / (p.ne22*p.ne21*p.ne20);
|
||||
const uint i23_offset = i23 * p.ne22*p.ne21*p.ne20;
|
||||
const uint i22 = (idx - i23_offset) / (p.ne21*p.ne20);
|
||||
const uint i22_offset = i22*p.ne21*p.ne20;
|
||||
const uint i21 = (idx - i23_offset - i22_offset) / p.ne20;
|
||||
const uint i20 = idx - i23_offset - i22_offset - i21*p.ne20;
|
||||
return i23*p.nb23 + i22*p.nb22 + i21*p.nb21 + i20*p.nb20;
|
||||
}
|
9
vulkan-shaders/generic_head.comp
Normal file
9
vulkan-shaders/generic_head.comp
Normal file
|
@ -0,0 +1,9 @@
|
|||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint KX;
|
||||
uint KY;
|
||||
float param1;
|
||||
float param2;
|
||||
} p;
|
35
vulkan-shaders/generic_unary_head.comp
Normal file
35
vulkan-shaders/generic_unary_head.comp
Normal file
|
@ -0,0 +1,35 @@
|
|||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint ne;
|
||||
uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
|
||||
uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
|
||||
uint d_offset;
|
||||
float param1; float param2;
|
||||
} p;
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
uint src0_idx(uint idx) {
|
||||
const uint i03 = idx / (p.ne02*p.ne01*p.ne00);
|
||||
const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
|
||||
const uint i02 = (idx - i03_offset) / (p.ne01*p.ne00);
|
||||
const uint i02_offset = i02*p.ne01*p.ne00;
|
||||
const uint i01 = (idx - i03_offset - i02_offset) / p.ne00;
|
||||
const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
|
||||
return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
|
||||
}
|
||||
|
||||
uint dst_idx(uint idx) {
|
||||
const uint i13 = idx / (p.ne12*p.ne11*p.ne10);
|
||||
const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
|
||||
const uint i12 = (idx - i13_offset) / (p.ne11*p.ne10);
|
||||
const uint i12_offset = i12*p.ne11*p.ne10;
|
||||
const uint i11 = (idx - i13_offset - i12_offset) / p.ne10;
|
||||
const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
|
||||
return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
|
||||
}
|
26
vulkan-shaders/get_rows.comp
Normal file
26
vulkan-shaders/get_rows.comp
Normal file
|
@ -0,0 +1,26 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_binary_head.comp"
|
||||
|
||||
void main() {
|
||||
const uint i00 = gl_GlobalInvocationID.x;
|
||||
const uint i10 = gl_GlobalInvocationID.y;
|
||||
const uint i11 = (gl_GlobalInvocationID.z)/p.ne12;
|
||||
const uint i12 = (gl_GlobalInvocationID.z)%p.ne12;
|
||||
|
||||
if (i00 >= p.ne00) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint i01 = data_b[i10*p.nb10 + i11*p.nb11 + i12*p.nb12];
|
||||
|
||||
const uint a_offset = i01*p.nb01 + i11*p.nb02 + i12*p.nb03;
|
||||
const uint d_offset = i10*p.nb21 + i11*p.nb22 + i12*p.nb23;
|
||||
|
||||
#ifndef OPTIMIZATION_ERROR_WORKAROUND
|
||||
data_d[d_offset + i00] = D_TYPE(data_a[a_offset + i00]);
|
||||
#else
|
||||
data_d[d_offset + i00] = data_a[a_offset + i00];
|
||||
#endif
|
||||
}
|
31
vulkan-shaders/get_rows_quant.comp
Normal file
31
vulkan-shaders/get_rows_quant.comp
Normal file
|
@ -0,0 +1,31 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_binary_head.comp"
|
||||
#include "dequant_funcs.comp"
|
||||
|
||||
void main() {
|
||||
const uint i00 = (gl_GlobalInvocationID.x)*2;
|
||||
const uint i10 = gl_GlobalInvocationID.y;
|
||||
const uint i11 = (gl_GlobalInvocationID.z)/p.ne12;
|
||||
const uint i12 = (gl_GlobalInvocationID.z)%p.ne12;
|
||||
|
||||
if (i00 >= p.ne00) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint i01 = data_b[i10*p.nb10 + i11*p.nb11 + i12*p.nb12];
|
||||
|
||||
const uint a_offset = i01*p.nb01 + i11*p.nb02 + i12*p.nb03;
|
||||
const uint d_offset = i10*p.nb21 + i11*p.nb22 + i12*p.nb23;
|
||||
|
||||
const uint ib = a_offset + i00/QUANT_K; // block index
|
||||
const uint iqs = (i00%QUANT_K)/QUANT_R; // quant index
|
||||
const uint iybs = i00 - i00%QUANT_K; // dst block start index
|
||||
const uint y_offset = QUANT_R == 1 ? 1 : QUANT_K/2;
|
||||
|
||||
vec2 v = dequantize(ib, iqs, 0);
|
||||
|
||||
data_d[d_offset + iybs + iqs ] = D_TYPE(v.x);
|
||||
data_d[d_offset + iybs + iqs + y_offset] = D_TYPE(v.y);
|
||||
}
|
12
vulkan-shaders/mul.comp
Normal file
12
vulkan-shaders/mul.comp
Normal file
|
@ -0,0 +1,12 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_binary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]) * FLOAT_TYPE(data_b[src1_idx(gl_GlobalInvocationID.x)]));
|
||||
}
|
29
vulkan-shaders/mul_mat_split_k_reduce.comp
Normal file
29
vulkan-shaders/mul_mat_split_k_reduce.comp
Normal file
|
@ -0,0 +1,29 @@
|
|||
#version 450
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
|
||||
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {float data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {float data_d[];};
|
||||
|
||||
layout (push_constant) uniform parameter {
|
||||
uint ne;
|
||||
uint k_num;
|
||||
} p;
|
||||
|
||||
void main() {
|
||||
const uint idx = gl_GlobalInvocationID.x;
|
||||
|
||||
if (idx >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
float result = 0.0f;
|
||||
|
||||
[[unroll]] for (uint i = 0; i < p.k_num; i++) {
|
||||
result += data_a[i * p.ne + idx];
|
||||
}
|
||||
|
||||
data_d[idx] = result;
|
||||
}
|
50
vulkan-shaders/mul_mat_vec.comp
Normal file
50
vulkan-shaders/mul_mat_vec.comp
Normal file
|
@ -0,0 +1,50 @@
|
|||
#version 450
|
||||
|
||||
#ifdef FLOAT16
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
|
||||
#endif
|
||||
|
||||
#include "mul_mat_vec_base.comp"
|
||||
|
||||
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (constant_id = 0) const uint BLOCK_SIZE = 32;
|
||||
|
||||
shared FLOAT_TYPE tmp[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
|
||||
uint a_offset, b_offset, d_offset;
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
|
||||
const uint y_offset = QUANT_R == 1 ? 1 : QUANT_K/2;
|
||||
|
||||
tmp[tid] = FLOAT_TYPE(0.0f);
|
||||
|
||||
[[unroll]] for (uint i = 0; i < p.ncols/BLOCK_SIZE; i += 2) {
|
||||
const uint col = i*BLOCK_SIZE + 2*tid;
|
||||
const uint ib = (row*p.ncols + col)/QUANT_K; // block index
|
||||
const uint iqs = (col%QUANT_K)/QUANT_R; // quant index
|
||||
const uint iybs = col - col%QUANT_K; // y block start index
|
||||
|
||||
vec2 v = dequantize(ib, iqs, a_offset / QUANT_K);
|
||||
|
||||
// matrix multiplication
|
||||
tmp[tid] += FLOAT_TYPE(v.x) * FLOAT_TYPE(data_b[b_offset + iybs + iqs]) +
|
||||
FLOAT_TYPE(v.y) * FLOAT_TYPE(data_b[b_offset + iybs + iqs + y_offset]);
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
if (tid == 0) {
|
||||
data_d[d_offset + row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
81
vulkan-shaders/mul_mat_vec_base.comp
Normal file
81
vulkan-shaders/mul_mat_vec_base.comp
Normal file
|
@ -0,0 +1,81 @@
|
|||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#extension GL_EXT_shader_8bit_storage : require
|
||||
|
||||
#define K_QUANTS_PER_ITERATION 2
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
#define EXPERT_COUNT 8
|
||||
#endif
|
||||
|
||||
#include "types.comp"
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
|
||||
layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
|
||||
#ifdef MUL_MAT_ID
|
||||
layout (binding = 3) readonly buffer IDS {int data_ids[];};
|
||||
#endif
|
||||
|
||||
#include "dequant_funcs.comp"
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint ncols;
|
||||
uint stride_a;
|
||||
uint stride_b;
|
||||
uint stride_d;
|
||||
|
||||
uint batch_stride_a;
|
||||
uint batch_stride_b;
|
||||
uint batch_stride_d;
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
uint nei0;
|
||||
uint ne11;
|
||||
#else
|
||||
uint ne02;
|
||||
uint ne12;
|
||||
uint broadcast2;
|
||||
uint broadcast3;
|
||||
#endif
|
||||
} p;
|
||||
|
||||
void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) {
|
||||
#ifdef MUL_MAT_ID
|
||||
const uint expert_idx = gl_GlobalInvocationID.y;
|
||||
#else
|
||||
const uint batch_idx = gl_GlobalInvocationID.y;
|
||||
#endif
|
||||
|
||||
#ifndef MUL_MAT_ID
|
||||
const uint i13 = batch_idx / p.ne12;
|
||||
const uint i12 = batch_idx % p.ne12;
|
||||
|
||||
const uint i03 = i13 / p.broadcast3;
|
||||
const uint i02 = i12 / p.broadcast2;
|
||||
|
||||
const uint batch_idx_a = i03 * p.ne02 + i02;
|
||||
#else
|
||||
const uint expert_id = data_ids[expert_idx];
|
||||
#endif
|
||||
|
||||
a_offset =
|
||||
#ifdef MUL_MAT_ID
|
||||
expert_id * p.batch_stride_a;
|
||||
#else
|
||||
batch_idx_a * p.batch_stride_a;
|
||||
#endif
|
||||
b_offset =
|
||||
#ifdef MUL_MAT_ID
|
||||
(expert_idx % p.ne11) * p.stride_b;
|
||||
#else
|
||||
batch_idx * p.batch_stride_b;
|
||||
#endif
|
||||
d_offset =
|
||||
#ifdef MUL_MAT_ID
|
||||
expert_idx * p.stride_d;
|
||||
#else
|
||||
batch_idx * p.batch_stride_d;
|
||||
#endif
|
||||
}
|
71
vulkan-shaders/mul_mat_vec_nc.comp
Normal file
71
vulkan-shaders/mul_mat_vec_nc.comp
Normal file
|
@ -0,0 +1,71 @@
|
|||
#version 450
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
#define BLOCK_SIZE 32
|
||||
#define FLOAT_TYPE float
|
||||
|
||||
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
|
||||
layout (binding = 2) writeonly buffer D {D_TYPE dst[];};
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint ncols_x;
|
||||
uint nrows_x;
|
||||
uint row_stride_x;
|
||||
uint channel_stride_x;
|
||||
uint channel_x_divisor;
|
||||
uint b_offset;
|
||||
uint d_offset;
|
||||
} p;
|
||||
|
||||
shared FLOAT_TYPE tmp[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint row_x = gl_GlobalInvocationID.y;
|
||||
const uint channel = gl_GlobalInvocationID.z;
|
||||
const uint channel_x = channel / p.channel_x_divisor;
|
||||
|
||||
const uint nrows_y = p.ncols_x;
|
||||
const uint nrows_dst = p.nrows_x;
|
||||
const uint row_dst = row_x;
|
||||
|
||||
const uint idst = channel*nrows_dst + row_dst;
|
||||
|
||||
tmp[tid] = 0.0f;
|
||||
|
||||
for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) {
|
||||
const uint col_x = col_x0 + tid;
|
||||
|
||||
if (col_x >= p.ncols_x) {
|
||||
break;
|
||||
}
|
||||
|
||||
const uint row_y = col_x;
|
||||
|
||||
const uint ix = channel_x*p.channel_stride_x + row_x*p.row_stride_x + col_x;
|
||||
const uint iy = channel*nrows_y + row_y;
|
||||
|
||||
const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]);
|
||||
|
||||
tmp[tid] += xi * FLOAT_TYPE(data_b[iy]);
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
if (tid == 0) {
|
||||
dst[idst] = tmp[0];
|
||||
}
|
||||
}
|
73
vulkan-shaders/mul_mat_vec_p021.comp
Normal file
73
vulkan-shaders/mul_mat_vec_p021.comp
Normal file
|
@ -0,0 +1,73 @@
|
|||
#version 450
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
#define BLOCK_SIZE 32
|
||||
#define FLOAT_TYPE float
|
||||
|
||||
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
|
||||
layout (binding = 2) writeonly buffer D {D_TYPE dst[];};
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint ncols_x;
|
||||
uint nrows_x;
|
||||
uint nchannels_x;
|
||||
uint nchannels_y;
|
||||
uint b_offset;
|
||||
uint d_offset;
|
||||
} p;
|
||||
|
||||
shared FLOAT_TYPE tmp[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint row_x = gl_GlobalInvocationID.y;
|
||||
const uint channel = gl_GlobalInvocationID.z;
|
||||
const uint channel_x = channel / (p.nchannels_y / p.nchannels_x);
|
||||
|
||||
const uint nrows_y = p.ncols_x;
|
||||
const uint nrows_dst = p.nrows_x;
|
||||
const uint row_dst = row_x;
|
||||
|
||||
tmp[tid] = FLOAT_TYPE(0.0f);
|
||||
|
||||
for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) {
|
||||
const uint col_x = col_x0 + tid;
|
||||
|
||||
if (col_x >= p.ncols_x) {
|
||||
break;
|
||||
}
|
||||
|
||||
// x is transposed and permuted
|
||||
const uint ix = row_x*p.nchannels_x*p.ncols_x + channel_x*p.ncols_x + col_x;
|
||||
const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]);
|
||||
|
||||
const uint row_y = col_x;
|
||||
|
||||
// y is not transposed but permuted
|
||||
const uint iy = channel*nrows_y + row_y;
|
||||
|
||||
tmp[tid] += xi * FLOAT_TYPE(data_b[iy]);
|
||||
}
|
||||
|
||||
// dst is not transposed and not permuted
|
||||
const uint idst = channel*nrows_dst + row_dst;
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
if (tid == 0) {
|
||||
dst[idst] = tmp[0];
|
||||
}
|
||||
}
|
73
vulkan-shaders/mul_mat_vec_q2_k.comp
Normal file
73
vulkan-shaders/mul_mat_vec_q2_k.comp
Normal file
|
@ -0,0 +1,73 @@
|
|||
#version 450
|
||||
|
||||
#include "mul_mat_vec_base.comp"
|
||||
|
||||
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
shared FLOAT_TYPE tmp[32];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
|
||||
uint a_offset, b_offset, d_offset;
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
|
||||
const uint num_blocks_per_row = p.ncols / QUANT_K;
|
||||
const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||
const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
|
||||
|
||||
const uint step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
|
||||
|
||||
const uint v_im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||
const uint v_in = tid - step*v_im; // 0...15 or 0...7
|
||||
|
||||
const uint l0 = K_QUANTS_PER_ITERATION*v_in; // 0...15
|
||||
const uint q_offset = 32*v_im + l0;
|
||||
const uint s_offset = 8*v_im;
|
||||
const uint y_offset = 128*v_im + l0;
|
||||
|
||||
tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp
|
||||
|
||||
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||
const uint y_idx = i * QUANT_K + y_offset;
|
||||
|
||||
const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib0 + i].d.x);
|
||||
const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib0 + i].d.y);
|
||||
|
||||
FLOAT_TYPE sum1 = FLOAT_TYPE(0.0);
|
||||
FLOAT_TYPE sum2 = FLOAT_TYPE(0.0);
|
||||
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||
sum1 += FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 0) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 0) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 2) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 2) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 4) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 4) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 6) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 6) & 3);
|
||||
sum2 += FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 0] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 1] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 2] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 3] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 4] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 5] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 6] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 7] >> 4) & 0xF);
|
||||
}
|
||||
tmp[16 * ix + tid] += dall * sum1 - dmin * sum2;
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (uint s = 16; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
if (tid == 0) {
|
||||
data_d[d_offset + row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
66
vulkan-shaders/mul_mat_vec_q3_k.comp
Normal file
66
vulkan-shaders/mul_mat_vec_q3_k.comp
Normal file
|
@ -0,0 +1,66 @@
|
|||
#version 450
|
||||
|
||||
#include "mul_mat_vec_base.comp"
|
||||
|
||||
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
shared FLOAT_TYPE tmp[32];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
|
||||
uint a_offset, b_offset, d_offset;
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
|
||||
const uint num_blocks_per_row = p.ncols / QUANT_K;
|
||||
const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||
const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
|
||||
|
||||
const uint step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
|
||||
|
||||
const uint v_im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||
const uint v_in = tid - step*v_im; // 0...15 or 0...7
|
||||
|
||||
const uint8_t m = uint8_t(1 << (4 * v_im));
|
||||
|
||||
const uint l0 = K_QUANTS_PER_ITERATION*v_in; // 0...15
|
||||
const uint q_offset = 32*v_im + l0;
|
||||
const uint y_offset = 128*v_im + l0;
|
||||
|
||||
tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp
|
||||
|
||||
const uint s_shift = 4 * v_im;
|
||||
|
||||
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||
const uint y_idx = i * QUANT_K + y_offset;
|
||||
|
||||
const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
|
||||
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(0.0);
|
||||
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||
sum += FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[0] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[2] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[4] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[6] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[1] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[3] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[5] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[7] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4));
|
||||
}
|
||||
tmp[16 * ix + tid] += d * sum;
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (uint s = 16; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
if (tid == 0) {
|
||||
data_d[d_offset + row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
115
vulkan-shaders/mul_mat_vec_q4_k.comp
Normal file
115
vulkan-shaders/mul_mat_vec_q4_k.comp
Normal file
|
@ -0,0 +1,115 @@
|
|||
#version 450
|
||||
|
||||
#include "mul_mat_vec_base.comp"
|
||||
|
||||
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
shared FLOAT_TYPE tmp[32];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
|
||||
uint a_offset, b_offset, d_offset;
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
|
||||
const uint num_blocks_per_row = p.ncols / QUANT_K;
|
||||
const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||
const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
|
||||
|
||||
const uint step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
|
||||
|
||||
const uint il = tid/step; // 0...3
|
||||
const uint ir = tid - step*il; // 0...7 or 0...3
|
||||
const uint n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
|
||||
|
||||
const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
||||
const uint v_in = il % 2;
|
||||
|
||||
const uint l0 = n * (2 * ir + v_in); // 0...15
|
||||
const uint q_offset = 32*v_im + l0;
|
||||
const uint y_offset = 64*v_im + l0;
|
||||
|
||||
tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp
|
||||
|
||||
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||
const uint y1_idx = i * QUANT_K + y_offset;
|
||||
const uint y2_idx = y1_idx + 128;
|
||||
|
||||
const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib0 + i].d.x);
|
||||
const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib0 + i].d.y);
|
||||
|
||||
const uint8_t sc0 = uint8_t( data_a[ib0 + i].scales[v_im * 2 ] & 0x3f);
|
||||
const uint8_t sc1 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 1] & 0x3f);
|
||||
const uint8_t sc2 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 4] & 0x3f);
|
||||
const uint8_t sc3 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 5] & 0x3f);
|
||||
const uint8_t sc4 = uint8_t(( data_a[ib0 + i].scales[v_im * 2 + 8] & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 ] & 0xc0) >> 2));
|
||||
const uint8_t sc5 = uint8_t(( data_a[ib0 + i].scales[v_im * 2 + 9] & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 1] & 0xc0) >> 2));
|
||||
const uint8_t sc6 = uint8_t(((data_a[ib0 + i].scales[v_im * 2 + 8] >> 4) & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 4] & 0xc0) >> 2));
|
||||
const uint8_t sc7 = uint8_t(((data_a[ib0 + i].scales[v_im * 2 + 9] >> 4) & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 5] & 0xc0) >> 2));
|
||||
|
||||
#if K_QUANTS_PER_ITERATION == 2
|
||||
const uint8_t q4_0 = uint8_t(data_a[ib0 + i].qs[q_offset ] & 0xf);
|
||||
const uint8_t q4_1 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] & 0xf);
|
||||
const uint8_t q4_2 = uint8_t(data_a[ib0 + i].qs[q_offset + 2] & 0xf);
|
||||
const uint8_t q4_3 = uint8_t(data_a[ib0 + i].qs[q_offset + 3] & 0xf);
|
||||
const uint8_t q4_4 = uint8_t(data_a[ib0 + i].qs[q_offset ] >> 4);
|
||||
const uint8_t q4_5 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] >> 4);
|
||||
const uint8_t q4_6 = uint8_t(data_a[ib0 + i].qs[q_offset + 2] >> 4);
|
||||
const uint8_t q4_7 = uint8_t(data_a[ib0 + i].qs[q_offset + 3] >> 4);
|
||||
const uint8_t q4_8 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] & 0xf);
|
||||
const uint8_t q4_9 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] & 0xf);
|
||||
const uint8_t q4_10 = uint8_t(data_a[ib0 + i].qs[q_offset + 66] & 0xf);
|
||||
const uint8_t q4_11 = uint8_t(data_a[ib0 + i].qs[q_offset + 67] & 0xf);
|
||||
const uint8_t q4_12 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] >> 4);
|
||||
const uint8_t q4_13 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] >> 4);
|
||||
const uint8_t q4_14 = uint8_t(data_a[ib0 + i].qs[q_offset + 66] >> 4);
|
||||
const uint8_t q4_15 = uint8_t(data_a[ib0 + i].qs[q_offset + 67] >> 4);
|
||||
|
||||
const FLOAT_TYPE sx = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx]) * q4_0 + FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * q4_1 + FLOAT_TYPE(data_b[b_offset + y1_idx + 2]) * q4_2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 3]) * q4_3);
|
||||
const FLOAT_TYPE sy = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * q4_4 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * q4_5 + FLOAT_TYPE(data_b[b_offset + y1_idx + 34]) * q4_6 + FLOAT_TYPE(data_b[b_offset + y1_idx + 35]) * q4_7);
|
||||
const FLOAT_TYPE sz = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx]) * q4_8 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * q4_9 + FLOAT_TYPE(data_b[b_offset + y2_idx + 2]) * q4_10 + FLOAT_TYPE(data_b[b_offset + y2_idx + 3]) * q4_11);
|
||||
const FLOAT_TYPE sw = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * q4_12 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * q4_13 + FLOAT_TYPE(data_b[b_offset + y2_idx + 34]) * q4_14 + FLOAT_TYPE(data_b[b_offset + y2_idx + 35]) * q4_15);
|
||||
const FLOAT_TYPE smin = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx ]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx ]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 2]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 34]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 2]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 34]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 3]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 35]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 3]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 35]) * sc7
|
||||
);
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * sc0 + sy * sc1 + sz * sc4 + sw * sc5) - dmin * smin);
|
||||
#else
|
||||
const uint8_t q4_0 = uint8_t(data_a[ib0 + i].qs[q_offset ] & 0xf);
|
||||
const uint8_t q4_1 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] & 0xf);
|
||||
const uint8_t q4_2 = uint8_t(data_a[ib0 + i].qs[q_offset ] >> 4);
|
||||
const uint8_t q4_3 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] >> 4);
|
||||
const uint8_t q4_4 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] & 0xf);
|
||||
const uint8_t q4_5 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] & 0xf);
|
||||
const uint8_t q4_6 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] >> 4);
|
||||
const uint8_t q4_7 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] >> 4);
|
||||
|
||||
const FLOAT_TYPE sx = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx ]) * q4_0 + FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * q4_1);
|
||||
const FLOAT_TYPE sy = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * q4_2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * q4_3);
|
||||
const FLOAT_TYPE sz = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx ]) * q4_4 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * q4_5);
|
||||
const FLOAT_TYPE sw = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * q4_6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * q4_7);
|
||||
const FLOAT_TYPE smin = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * sc7
|
||||
);
|
||||
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * FLOAT_TYPE(data_a[ib0 + i].scales[v_im] & 0x3f) + sy * FLOAT_TYPE(data_a[ib0 + i].scales[v_im + 1] & 0x3f) + sz * FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 4] & 0x0f) | ((data_a[ib0 + i].scales[v_im] & 0xc0) >> 2)) + sw * FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 5] & 0x0f) | ((data_a[ib0 + i].scales[v_im + 1] & 0xc0) >> 2))) - dmin * smin);
|
||||
#endif
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (uint s = 16; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
if (tid == 0) {
|
||||
data_d[d_offset + row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
111
vulkan-shaders/mul_mat_vec_q5_k.comp
Normal file
111
vulkan-shaders/mul_mat_vec_q5_k.comp
Normal file
|
@ -0,0 +1,111 @@
|
|||
#version 450
|
||||
|
||||
#include "mul_mat_vec_base.comp"
|
||||
|
||||
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
shared FLOAT_TYPE tmp[32];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
|
||||
uint a_offset, b_offset, d_offset;
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
|
||||
const uint num_blocks_per_row = p.ncols / QUANT_K;
|
||||
const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x/2; // 0...31 or 0...16
|
||||
const uint ix = gl_LocalInvocationID.x%2; // 0 or 0, 1
|
||||
|
||||
const uint il = tid/4; // 0...3
|
||||
const uint ir = tid - 4*il; // 0...7 or 0...3
|
||||
|
||||
const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
||||
const uint v_in = il % 2;
|
||||
|
||||
const uint l0 = 4*ir + 2*v_in; // 0...15
|
||||
const uint q_offset = 32*v_im + l0;
|
||||
const uint y_offset = 64*v_im + l0;
|
||||
|
||||
const uint8_t hm1 = uint8_t(1 << (2*v_im));
|
||||
const uint8_t hm2 = uint8_t(hm1 << 4);
|
||||
|
||||
tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp
|
||||
|
||||
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += 2) {
|
||||
const uint y1_idx = i * QUANT_K + y_offset;
|
||||
const uint y2_idx = y1_idx + 128;
|
||||
|
||||
const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib0 + i].d.x);
|
||||
const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib0 + i].d.y);
|
||||
|
||||
const uint8_t sc0 = uint8_t( data_a[ib0 + i].scales[v_im * 2 ] & 0x3f);
|
||||
const uint8_t sc1 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 1] & 0x3f);
|
||||
const uint8_t sc2 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 4] & 0x3f);
|
||||
const uint8_t sc3 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 5] & 0x3f);
|
||||
const uint8_t sc4 = uint8_t(( data_a[ib0 + i].scales[v_im * 2 + 8] & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 ] & 0xc0) >> 2));
|
||||
const uint8_t sc5 = uint8_t(( data_a[ib0 + i].scales[v_im * 2 + 9] & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 1] & 0xc0) >> 2));
|
||||
const uint8_t sc6 = uint8_t(((data_a[ib0 + i].scales[v_im * 2 + 8] >> 4) & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 4] & 0xc0) >> 2));
|
||||
const uint8_t sc7 = uint8_t(((data_a[ib0 + i].scales[v_im * 2 + 9] >> 4) & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 5] & 0xc0) >> 2));
|
||||
|
||||
const uint8_t q4_0 = uint8_t(data_a[ib0 + i].qs[q_offset ] & 0xf);
|
||||
const uint8_t q4_1 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] & 0xf);
|
||||
const uint8_t q4_2 = uint8_t(data_a[ib0 + i].qs[q_offset + 16] & 0xf);
|
||||
const uint8_t q4_3 = uint8_t(data_a[ib0 + i].qs[q_offset + 17] & 0xf);
|
||||
const uint8_t q4_4 = uint8_t(data_a[ib0 + i].qs[q_offset ] >> 4);
|
||||
const uint8_t q4_5 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] >> 4);
|
||||
const uint8_t q4_6 = uint8_t(data_a[ib0 + i].qs[q_offset + 16] >> 4);
|
||||
const uint8_t q4_7 = uint8_t(data_a[ib0 + i].qs[q_offset + 17] >> 4);
|
||||
const uint8_t q4_8 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] & 0xf);
|
||||
const uint8_t q4_9 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] & 0xf);
|
||||
const uint8_t q4_10 = uint8_t(data_a[ib0 + i].qs[q_offset + 80] & 0xf);
|
||||
const uint8_t q4_11 = uint8_t(data_a[ib0 + i].qs[q_offset + 81] & 0xf);
|
||||
const uint8_t q4_12 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] >> 4);
|
||||
const uint8_t q4_13 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] >> 4);
|
||||
const uint8_t q4_14 = uint8_t(data_a[ib0 + i].qs[q_offset + 80] >> 4);
|
||||
const uint8_t q4_15 = uint8_t(data_a[ib0 + i].qs[q_offset + 81] >> 4);
|
||||
|
||||
const FLOAT_TYPE sx = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx ]) * (q4_0 + (((data_a[ib0 + i].qh[l0 ] & hm1) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * (q4_1 + (((data_a[ib0 + i].qh[l0 + 1] & hm1) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 16]) * (q4_2 + (((data_a[ib0 + i].qh[l0 + 16] & hm1) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 17]) * (q4_3 + (((data_a[ib0 + i].qh[l0 + 17] & hm1) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE sy = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * (q4_4 + (((data_a[ib0 + i].qh[l0 ] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * (q4_5 + (((data_a[ib0 + i].qh[l0 + 1] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 48]) * (q4_6 + (((data_a[ib0 + i].qh[l0 + 16] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 49]) * (q4_7 + (((data_a[ib0 + i].qh[l0 + 17] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE sz = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y2_idx ]) * (q4_8 + (((data_a[ib0 + i].qh[l0 ] & hm2) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * (q4_9 + (((data_a[ib0 + i].qh[l0 + 1] & hm2) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 16]) * (q4_10 + (((data_a[ib0 + i].qh[l0 + 16] & hm2) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 17]) * (q4_11 + (((data_a[ib0 + i].qh[l0 + 17] & hm2) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE sw = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * (q4_12 + (((data_a[ib0 + i].qh[l0 ] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * (q4_13 + (((data_a[ib0 + i].qh[l0 + 1] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 48]) * (q4_14 + (((data_a[ib0 + i].qh[l0 + 16] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 49]) * (q4_15 + (((data_a[ib0 + i].qh[l0 + 17] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE smin = FLOAT_TYPE(
|
||||
(FLOAT_TYPE(data_b[b_offset + y1_idx]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 17])) * sc2 + (FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 49])) * sc3
|
||||
+ (FLOAT_TYPE(data_b[b_offset + y2_idx]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 17])) * sc6 + (FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 49])) * sc7
|
||||
);
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * sc0 + sy * sc1 + sz * sc4 + sw * sc5) - dmin * smin);
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (uint s = 16; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
if (tid == 0) {
|
||||
data_d[d_offset + row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
79
vulkan-shaders/mul_mat_vec_q6_k.comp
Normal file
79
vulkan-shaders/mul_mat_vec_q6_k.comp
Normal file
|
@ -0,0 +1,79 @@
|
|||
#version 450
|
||||
|
||||
#include "mul_mat_vec_base.comp"
|
||||
|
||||
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
shared FLOAT_TYPE tmp[32];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
|
||||
uint a_offset, b_offset, d_offset;
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
|
||||
const uint num_blocks_per_row = p.ncols / QUANT_K;
|
||||
const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row;
|
||||
|
||||
const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||
const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
|
||||
|
||||
const uint step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
|
||||
|
||||
const uint v_im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||
const uint v_in = tid - step*v_im; // 0...15 or 0...7
|
||||
|
||||
#if K_QUANTS_PER_ITERATION == 1
|
||||
const uint l0 = v_in; // 0...15
|
||||
const uint is = 0;
|
||||
#else
|
||||
const uint l0 = 4 * v_in; // 0, 4, 8, ..., 28
|
||||
const uint is = v_in / 4;
|
||||
#endif
|
||||
|
||||
const uint ql_offset = 64*v_im + l0;
|
||||
const uint qh_offset = 32*v_im + l0;
|
||||
const uint s_offset = 8*v_im + is;
|
||||
const uint y_offset = 128*v_im + l0;
|
||||
|
||||
tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp
|
||||
|
||||
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||
const uint y_idx = i * QUANT_K + y_offset;
|
||||
|
||||
const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
|
||||
|
||||
#if K_QUANTS_PER_ITERATION == 1
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(data_b[b_offset + y_idx + 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x03) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 16]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x03) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x0c) << 2)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 48]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x0c) << 2)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x30) >> 0)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 80]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x30) >> 0)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0xc0) >> 2)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx +112]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0xc0) >> 2)) - 32);
|
||||
tmp[16 * ix + tid] += sum;
|
||||
#else
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(0.0);
|
||||
[[unroll]] for (int l = 0; l < 4; ++l) {
|
||||
sum += FLOAT_TYPE(data_b[b_offset + y_idx + l+ 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 0) & 3) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l+32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 2) & 3) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l+64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 4) & 3) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l+96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 6) & 3) << 4)) - 32);
|
||||
}
|
||||
tmp[16 * ix + tid] += sum;
|
||||
#endif
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (uint s = 16; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
tmp[tid] += tmp[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
if (tid == 0) {
|
||||
data_d[d_offset + row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
494
vulkan-shaders/mul_mm.comp
Normal file
494
vulkan-shaders/mul_mm.comp
Normal file
|
@ -0,0 +1,494 @@
|
|||
#version 450
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
#ifdef FLOAT16
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
|
||||
#endif
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
|
||||
#endif
|
||||
|
||||
#include "types.comp"
|
||||
|
||||
#ifndef LOAD_VEC_A
|
||||
#define LOAD_VEC_A 1
|
||||
#endif
|
||||
#ifndef LOAD_VEC_B
|
||||
#define LOAD_VEC_B 1
|
||||
#endif
|
||||
|
||||
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
|
||||
layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
layout (binding = 3) readonly buffer IDS {int data_ids[];};
|
||||
#endif
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint M;
|
||||
uint N;
|
||||
uint K;
|
||||
uint stride_a;
|
||||
uint stride_b;
|
||||
uint stride_d;
|
||||
|
||||
uint batch_stride_a;
|
||||
uint batch_stride_b;
|
||||
uint batch_stride_d;
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
uint nei0;
|
||||
uint nei1;
|
||||
uint nbi1;
|
||||
uint ne11;
|
||||
#else
|
||||
uint k_split;
|
||||
uint ne02;
|
||||
uint ne12;
|
||||
uint broadcast2;
|
||||
uint broadcast3;
|
||||
#endif
|
||||
} p;
|
||||
|
||||
layout (constant_id = 1) const uint BM = 64;
|
||||
layout (constant_id = 2) const uint BN = 64;
|
||||
layout (constant_id = 3) const uint BK = 16; // Assumed to be 32 if working with a quant
|
||||
layout (constant_id = 4) const uint WM = 32;
|
||||
layout (constant_id = 5) const uint WN = 32;
|
||||
layout (constant_id = 6) const uint WMITER = 2;
|
||||
layout (constant_id = 7) const uint TM = 4;
|
||||
layout (constant_id = 8) const uint TN = 2;
|
||||
layout (constant_id = 9) const uint WARP = 32;
|
||||
|
||||
shared FLOAT_TYPE buf_a[BM * (BK+1)];
|
||||
shared FLOAT_TYPE buf_b[BN * (BK+1)];
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
shared u16vec2 row_ids[2048];
|
||||
#endif
|
||||
|
||||
void main() {
|
||||
#ifdef MUL_MAT_ID
|
||||
const uint expert_idx = gl_GlobalInvocationID.z;
|
||||
#else
|
||||
const uint batch_idx = gl_GlobalInvocationID.z;
|
||||
|
||||
const uint i13 = batch_idx / p.ne12;
|
||||
const uint i12 = batch_idx % p.ne12;
|
||||
|
||||
const uint i03 = i13 / p.broadcast3;
|
||||
const uint i02 = i12 / p.broadcast2;
|
||||
|
||||
const uint batch_idx_a = i03 * p.ne02 + i02;
|
||||
#endif
|
||||
|
||||
const uint blocks_m = (p.M + BM - 1) / BM;
|
||||
const uint ir = gl_WorkGroupID.x % blocks_m;
|
||||
const uint ik = gl_WorkGroupID.x / blocks_m;
|
||||
const uint ic = gl_WorkGroupID.y;
|
||||
|
||||
const uint warp_i = gl_LocalInvocationID.x / WARP;
|
||||
const uint warp_r = warp_i % (BM / WM);
|
||||
const uint warp_c = warp_i / (BM / WM);
|
||||
|
||||
const uint WNITER = (WM * WN) / (WARP * TM * TN * WMITER);
|
||||
const uint WSUBM = WM / WMITER;
|
||||
const uint WSUBN = WN / WNITER;
|
||||
|
||||
const uint tiw = gl_LocalInvocationID.x % WARP;
|
||||
const uint tiwr = tiw % (WSUBM / TM);
|
||||
const uint tiwc = tiw / (WSUBM / TM);
|
||||
|
||||
const uint loadr_a = gl_LocalInvocationID.x % (BK / LOAD_VEC_A);
|
||||
const uint loadc_a = gl_LocalInvocationID.x / (BK / LOAD_VEC_A);
|
||||
const uint loadr_b = gl_LocalInvocationID.x % (BK / LOAD_VEC_B);
|
||||
const uint loadc_b = gl_LocalInvocationID.x / (BK / LOAD_VEC_B);
|
||||
|
||||
const uint loadstride_a = gl_WorkGroupSize.x * LOAD_VEC_A / BK;
|
||||
const uint loadstride_b = gl_WorkGroupSize.x * LOAD_VEC_B / BK;
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
uint _ne1 = 0;
|
||||
for (uint ii1 = 0; ii1 < p.nei1; ii1++) {
|
||||
for (uint ii0 = 0; ii0 < p.nei0; ii0++) {
|
||||
if (data_ids[ii1*p.nbi1 + ii0] == expert_idx) {
|
||||
row_ids[_ne1] = u16vec2(ii0, ii1);
|
||||
_ne1++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
barrier();
|
||||
|
||||
// Workgroup has no work
|
||||
if (ic * BN >= _ne1) return;
|
||||
#endif
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
const uint start_k = 0;
|
||||
const uint end_k = p.K;
|
||||
#else
|
||||
const uint start_k = ik * p.k_split;
|
||||
const uint end_k = min(p.K, (ik + 1) * p.k_split);
|
||||
#endif
|
||||
|
||||
uint pos_a = (
|
||||
#ifdef MUL_MAT_ID
|
||||
expert_idx * p.batch_stride_a +
|
||||
#else
|
||||
batch_idx_a * p.batch_stride_a +
|
||||
#endif
|
||||
ir * BM * p.stride_a + start_k) / LOAD_VEC_A;
|
||||
#ifdef MUL_MAT_ID
|
||||
uint pos_b = 0;
|
||||
#else
|
||||
uint pos_b = (batch_idx * p.batch_stride_b + ic * BN * p.stride_b + start_k) / LOAD_VEC_B;
|
||||
#endif
|
||||
|
||||
float sums[WMITER * TM * WNITER * TN];
|
||||
FLOAT_TYPE cache_a[WMITER * TM];
|
||||
FLOAT_TYPE cache_b[WNITER * TN];
|
||||
|
||||
[[unroll]] for (uint i = 0; i < WMITER*TM*WNITER*TN; i++) {
|
||||
sums[i] = 0.0f;
|
||||
}
|
||||
|
||||
[[unroll]] for (uint block = start_k; block < end_k; block += BK) {
|
||||
[[unroll]] for (uint l = 0; l < BM; l += loadstride_a) {
|
||||
|
||||
#if defined(DATA_A_F32) || defined(DATA_A_F16)
|
||||
#if LOAD_VEC_A == 8
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(data_a[idx][0].x);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(data_a[idx][0].y);
|
||||
buf_a[buf_idx + 2] = FLOAT_TYPE(data_a[idx][0].z);
|
||||
buf_a[buf_idx + 3] = FLOAT_TYPE(data_a[idx][0].w);
|
||||
buf_a[buf_idx + 4] = FLOAT_TYPE(data_a[idx][1].x);
|
||||
buf_a[buf_idx + 5] = FLOAT_TYPE(data_a[idx][1].y);
|
||||
buf_a[buf_idx + 6] = FLOAT_TYPE(data_a[idx][1].z);
|
||||
buf_a[buf_idx + 7] = FLOAT_TYPE(data_a[idx][1].w);
|
||||
#elif LOAD_VEC_A == 4
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(data_a[idx].x);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(data_a[idx].y);
|
||||
buf_a[buf_idx + 2] = FLOAT_TYPE(data_a[idx].z);
|
||||
buf_a[buf_idx + 3] = FLOAT_TYPE(data_a[idx].w);
|
||||
#else
|
||||
if (ir * BM + loadc_a + l < p.M && block + loadr_a < end_k) {
|
||||
buf_a[(loadc_a + l) * (BK+1) + loadr_a] = FLOAT_TYPE(data_a[pos_a + (loadc_a + l) * p.stride_a + loadr_a]);
|
||||
} else {
|
||||
buf_a[(loadc_a + l) * (BK+1) + loadr_a] = FLOAT_TYPE(0.0f);
|
||||
}
|
||||
#endif
|
||||
#elif defined(DATA_A_Q4_0)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a;
|
||||
|
||||
const uint ib = idx / 16;
|
||||
const uint iqs = idx & 0xF;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const uint vui = uint(data_a[ib].qs[iqs]);
|
||||
const vec2 v = (vec2(vui & 0xF, vui >> 4) - 8.0f) * d;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(v.x);
|
||||
buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
|
||||
#elif defined(DATA_A_Q4_1)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a;
|
||||
|
||||
const uint ib = idx / 16;
|
||||
const uint iqs = idx & 0xF;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const float m = float(data_a[ib].m);
|
||||
const uint vui = uint(data_a[ib].qs[iqs]);
|
||||
const vec2 v = vec2(vui & 0xF, vui >> 4) * d + m;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(v.x);
|
||||
buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
|
||||
#elif defined(DATA_A_Q5_0)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a;
|
||||
|
||||
const uint ib = idx / 16;
|
||||
const uint iqs = idx & 0xF;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const uint uint_qh = uint(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0];
|
||||
const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
|
||||
const uint vui = uint(data_a[ib].qs[iqs]);
|
||||
const vec2 v = (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f) * d;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(v.x);
|
||||
buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
|
||||
#elif defined(DATA_A_Q5_1)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a;
|
||||
|
||||
const uint ib = idx / 16;
|
||||
const uint iqs = idx & 0xF;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const float m = float(data_a[ib].m);
|
||||
const uint uint_qh = data_a[ib].qh;
|
||||
const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
|
||||
const uint vui = uint(data_a[ib].qs[iqs]);
|
||||
const vec2 v = vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) * d + m;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(v.x);
|
||||
buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
|
||||
#elif defined(DATA_A_Q8_0)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
||||
const uint ib = idx / 16;
|
||||
const uint iqs = (idx & 0xF) * 2;
|
||||
|
||||
const float d = float(data_a[ib].d);
|
||||
const vec2 v = vec2(int(data_a[ib].qs[iqs]), int(data_a[ib].qs[iqs + 1])) * d;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(v.x);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
|
||||
#elif defined(DATA_A_Q2_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
||||
const uint ib = idx / 128; // 2 values per idx
|
||||
const uint iqs = idx % 128; // 0..127
|
||||
|
||||
const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
|
||||
const uint scalesi = iqs / 8; // 0..15
|
||||
const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
|
||||
|
||||
const uvec2 qs = uvec2(data_a[ib].qs[qsi], data_a[ib].qs[qsi + 1]);
|
||||
const uint scales = data_a[ib].scales[scalesi];
|
||||
const vec2 d = vec2(data_a[ib].d);
|
||||
|
||||
const vec2 v = d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(v.x);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
|
||||
#elif defined(DATA_A_Q3_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
||||
const uint ib = idx / 128; // 2 values per idx
|
||||
const uint iqs = idx % 128; // 0..127
|
||||
|
||||
const uint n = iqs / 64; // 0,1
|
||||
const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
|
||||
const uint hmi = (iqs % 16) * 2; // 0,2,4..30
|
||||
const uint j = (iqs % 64) / 4; // 0..3
|
||||
const uint is = iqs / 8; // 0..15
|
||||
const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
|
||||
const uint qsshift = halfsplit * 2; // 0,2,4,6
|
||||
const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
|
||||
|
||||
const int8_t us = int8_t(is < 4 ? (data_a[ib].scales[is-0] & 0xF) | (((data_a[ib].scales[is+8] >> 0) & 3) << 4) :
|
||||
is < 8 ? (data_a[ib].scales[is-0] & 0xF) | (((data_a[ib].scales[is+4] >> 2) & 3) << 4) :
|
||||
is < 12 ? (data_a[ib].scales[is-8] >> 4) | (((data_a[ib].scales[is+0] >> 4) & 3) << 4) :
|
||||
(data_a[ib].scales[is-8] >> 4) | (((data_a[ib].scales[is-4] >> 6) & 3) << 4));
|
||||
const float dl = float(data_a[ib].d) * float(us - 32);
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4)));
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
|
||||
#elif defined(DATA_A_Q4_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
||||
const uint ib = idx / 128; // 2 values per idx
|
||||
const uint iqs = idx % 128; // 0..127
|
||||
|
||||
const uint n = iqs / 32; // 0,1,2,3
|
||||
const uint b = (iqs % 32) / 16; // 0,1
|
||||
const uint is = 2 * n + b; // 0..7
|
||||
const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
|
||||
|
||||
const vec2 loadd = vec2(data_a[ib].d);
|
||||
|
||||
uint8_t sc;
|
||||
uint8_t mbyte;
|
||||
if (is < 4) {
|
||||
sc = uint8_t(data_a[ib].scales[is ] & 63);
|
||||
mbyte = uint8_t(data_a[ib].scales[is + 4] & 63);
|
||||
} else {
|
||||
sc = uint8_t((data_a[ib].scales[is + 4] & 0xF) | ((data_a[ib].scales[is - 4] >> 6) << 4));
|
||||
mbyte = uint8_t((data_a[ib].scales[is + 4] >> 4) | ((data_a[ib].scales[is ] >> 6) << 4));
|
||||
}
|
||||
const float d = loadd.x * sc;
|
||||
const float m = loadd.y * mbyte;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(d * float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) - m);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(d * float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) - m);
|
||||
#elif defined(DATA_A_Q5_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
||||
const uint ib = idx / 128; // 2 values per idx
|
||||
const uint iqs = idx % 128; // 0..127
|
||||
|
||||
const uint n = iqs / 32; // 0,1,2,3
|
||||
const uint b = (iqs % 32) / 16; // 0,1
|
||||
const uint is = 2 * n + b; // 0..7
|
||||
const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
|
||||
const uint qhi = (iqs % 16) * 2; // 0,2,4..30
|
||||
|
||||
const uint8_t hm = uint8_t(1 << (iqs / 16));
|
||||
|
||||
const vec2 loadd = vec2(data_a[ib].d);
|
||||
|
||||
uint8_t sc;
|
||||
uint8_t mbyte;
|
||||
if (is < 4) {
|
||||
sc = uint8_t(data_a[ib].scales[is ] & 63);
|
||||
mbyte = uint8_t(data_a[ib].scales[is + 4] & 63);
|
||||
} else {
|
||||
sc = uint8_t((data_a[ib].scales[is + 4] & 0xF) | ((data_a[ib].scales[is - 4] >> 6) << 4));
|
||||
mbyte = uint8_t((data_a[ib].scales[is + 4] >> 4) | ((data_a[ib].scales[is ] >> 6) << 4));
|
||||
}
|
||||
const float d = loadd.x * sc;
|
||||
const float m = loadd.y * mbyte;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(d * (float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0)) - m);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(d * (float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0)) - m);
|
||||
#elif defined(DATA_A_Q6_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
||||
const uint ib = idx / 128; // 2 values per idx
|
||||
const uint iqs = idx % 128; // 0..127
|
||||
|
||||
const uint n = iqs / 64; // 0,1
|
||||
const uint b = (iqs % 64) / 32; // 0,1
|
||||
const uint is_b = (iqs % 16) / 8; // 0,1
|
||||
const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
|
||||
const uint is = 8 * n + qhshift + is_b; // 0..15
|
||||
const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
|
||||
const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
|
||||
|
||||
const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]);
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32));
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
|
||||
#endif
|
||||
}
|
||||
[[unroll]] for (uint l = 0; l < BN; l += loadstride_b) {
|
||||
#if LOAD_VEC_B == 8
|
||||
#ifdef MUL_MAT_ID
|
||||
const u16vec2 row_idx = row_ids[ic * BN + loadc_b + l];
|
||||
const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + loadr_b;
|
||||
#else
|
||||
const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b;
|
||||
#endif
|
||||
const uint buf_idx = (loadc_b + l) * (BK+1) + loadr_b * LOAD_VEC_B;
|
||||
buf_b[buf_idx + 0] = FLOAT_TYPE(data_b[idx][0].x);
|
||||
buf_b[buf_idx + 1] = FLOAT_TYPE(data_b[idx][0].y);
|
||||
buf_b[buf_idx + 2] = FLOAT_TYPE(data_b[idx][0].z);
|
||||
buf_b[buf_idx + 3] = FLOAT_TYPE(data_b[idx][0].w);
|
||||
buf_b[buf_idx + 4] = FLOAT_TYPE(data_b[idx][1].x);
|
||||
buf_b[buf_idx + 5] = FLOAT_TYPE(data_b[idx][1].y);
|
||||
buf_b[buf_idx + 6] = FLOAT_TYPE(data_b[idx][1].z);
|
||||
buf_b[buf_idx + 7] = FLOAT_TYPE(data_b[idx][1].w);
|
||||
#elif LOAD_VEC_B == 4
|
||||
#ifdef MUL_MAT_ID
|
||||
const u16vec2 row_idx = row_ids[ic * BN + loadc_b + l];
|
||||
const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + loadr_b;
|
||||
#else
|
||||
const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b;
|
||||
#endif
|
||||
const uint buf_idx = (loadc_b + l) * (BK+1) + loadr_b * LOAD_VEC_B;
|
||||
buf_b[buf_idx + 0] = FLOAT_TYPE(data_b[idx].x);
|
||||
buf_b[buf_idx + 1] = FLOAT_TYPE(data_b[idx].y);
|
||||
buf_b[buf_idx + 2] = FLOAT_TYPE(data_b[idx].z);
|
||||
buf_b[buf_idx + 3] = FLOAT_TYPE(data_b[idx].w);
|
||||
#elif !MUL_MAT_ID
|
||||
if (ic * BN + loadc_b + l < p.N && block + loadr_b < end_k) {
|
||||
buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(data_b[pos_b + (loadc_b + l) * p.stride_b + loadr_b]);
|
||||
} else {
|
||||
buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(0.0f);
|
||||
}
|
||||
#else
|
||||
const uint row_i = ic * BN + loadc_b + l;
|
||||
if (row_i < _ne1) {
|
||||
const u16vec2 row_idx = row_ids[row_i];
|
||||
buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(data_b[pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + loadr_b]);
|
||||
} else {
|
||||
buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(0.0f);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
barrier();
|
||||
|
||||
pos_a += BK / LOAD_VEC_A;
|
||||
pos_b += BK / LOAD_VEC_B;
|
||||
|
||||
for (uint i = 0; i < BK; i++) {
|
||||
// Load from shared into cache
|
||||
[[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
|
||||
[[unroll]] for (uint j = 0; j < TM; j++) {
|
||||
cache_a[wsir * TM + j] = buf_a[(warp_r * WM + wsir * WSUBM + tiwr * TM + j) * (BK+1) + i];
|
||||
}
|
||||
}
|
||||
[[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) {
|
||||
[[unroll]] for (uint j = 0; j < TN; j++) {
|
||||
cache_b[wsic * TN + j] = buf_b[(warp_c * WN + wsic * WSUBN + tiwc * TN + j) * (BK+1) + i];
|
||||
}
|
||||
}
|
||||
|
||||
[[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) {
|
||||
[[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
|
||||
[[unroll]] for (uint cc = 0; cc < TN; cc++) {
|
||||
[[unroll]] for (uint cr = 0; cr < TM; cr++) {
|
||||
sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr] += float(cache_a[wsir * TM + cr]) * float(cache_b[wsic * TN + cc]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
barrier();
|
||||
}
|
||||
|
||||
const uint dr = ir * BM + warp_r * WM;
|
||||
const uint dc = ic * BN + warp_c * WN;
|
||||
|
||||
#ifndef MUL_MAT_ID
|
||||
const uint offsets = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z;
|
||||
#endif
|
||||
|
||||
[[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) {
|
||||
[[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
|
||||
|
||||
const uint dr_warp = dr + wsir * WSUBM + tiwr * TM;
|
||||
const uint dc_warp = dc + wsic * WSUBN + tiwc * TN;
|
||||
[[unroll]] for (uint cc = 0; cc < TN; cc++) {
|
||||
#ifdef MUL_MAT_ID
|
||||
const uint row_i = dc_warp + cc;
|
||||
if (row_i >= _ne1) break;
|
||||
|
||||
const u16vec2 row_idx = row_ids[row_i];
|
||||
#endif
|
||||
[[unroll]] for (uint cr = 0; cr < TM; cr++) {
|
||||
#ifdef MUL_MAT_ID
|
||||
data_d[row_idx.y * p.batch_stride_d + row_idx.x * p.stride_d + dr_warp + cr] = D_TYPE(sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr]);
|
||||
#else
|
||||
if (dr_warp + cr < p.M && dc_warp + cc < p.N) {
|
||||
data_d[offsets + (dc_warp + cc) * p.stride_d + dr_warp + cr] = D_TYPE(sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr]);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
44
vulkan-shaders/norm.comp
Normal file
44
vulkan-shaders/norm.comp
Normal file
|
@ -0,0 +1,44 @@
|
|||
#version 450
|
||||
|
||||
#include "generic_head.comp"
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#define BLOCK_SIZE 512
|
||||
|
||||
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
shared vec2 sum[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
|
||||
sum[tid] = vec2(0.0f, 0.0f);
|
||||
|
||||
[[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
|
||||
const float xi = float(data_a[row*p.KX + col]);
|
||||
sum[tid].x += xi;
|
||||
sum[tid].y += xi * xi;
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
sum[tid] += sum[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
const float mean = sum[0].x / p.KX;
|
||||
const float var = sum[0].y / p.KX - mean * mean;
|
||||
const float inv_std = inversesqrt(var + p.param1);
|
||||
|
||||
[[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
|
||||
data_d[row*p.KX + col] = D_TYPE((float(data_a[row*p.KX + col]) - mean) * inv_std);
|
||||
}
|
||||
}
|
21
vulkan-shaders/relu.comp
Normal file
21
vulkan-shaders/relu.comp
Normal file
|
@ -0,0 +1,21 @@
|
|||
#version 450
|
||||
|
||||
#include "generic_head.comp"
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_GlobalInvocationID.x;
|
||||
|
||||
if (i >= p.KX) {
|
||||
return;
|
||||
}
|
||||
|
||||
data_d[i] = max(float(data_a[i]), 0);
|
||||
}
|
42
vulkan-shaders/rms_norm.comp
Normal file
42
vulkan-shaders/rms_norm.comp
Normal file
|
@ -0,0 +1,42 @@
|
|||
#version 450
|
||||
|
||||
#include "generic_head.comp"
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#define BLOCK_SIZE 512
|
||||
|
||||
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
shared FLOAT_TYPE sum[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
|
||||
sum[tid] = FLOAT_TYPE(0.0f); // partial sum for thread in warp
|
||||
|
||||
[[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
|
||||
const FLOAT_TYPE xi = FLOAT_TYPE(data_a[row*p.KX + col]);
|
||||
sum[tid] += xi * xi;
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
barrier();
|
||||
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
sum[tid] += sum[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(p.KX);
|
||||
const FLOAT_TYPE scale = inversesqrt(mean + FLOAT_TYPE(p.param1));
|
||||
|
||||
[[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
|
||||
data_d[row*p.KX + col] = D_TYPE(scale * FLOAT_TYPE(data_a[row*p.KX + col]));
|
||||
}
|
||||
}
|
44
vulkan-shaders/rope_head.comp
Normal file
44
vulkan-shaders/rope_head.comp
Normal file
|
@ -0,0 +1,44 @@
|
|||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout(local_size_x = 1, local_size_y = 256, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer Y {int data_pos[];};
|
||||
layout (binding = 2) readonly buffer Z {float data_ff[];};
|
||||
layout (binding = 3) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
layout (push_constant) uniform parameter {
|
||||
uint ncols;
|
||||
uint n_dims;
|
||||
float freq_scale;
|
||||
uint p_delta_rows;
|
||||
float freq_base;
|
||||
float ext_factor;
|
||||
float attn_factor;
|
||||
float corr_dims[2];
|
||||
float theta_scale;
|
||||
uint has_ff;
|
||||
} p;
|
||||
|
||||
float rope_yarn_ramp(const float low, const float high, const uint i0) {
|
||||
const float y = (i0 / 2 - low) / max(0.001f, high - low);
|
||||
return 1.0f - min(1.0f, max(0.0f, y));
|
||||
}
|
||||
|
||||
void rope_yarn(const float theta_extrap, const uint i0, out float cos_theta, out float sin_theta) {
|
||||
float mscale = p.attn_factor;
|
||||
// Get n-d rotational scaling corrected for extrapolation
|
||||
float theta_interp = p.freq_scale * theta_extrap;
|
||||
float theta = theta_interp;
|
||||
if (p.ext_factor != 0.0f) {
|
||||
float ramp_mix = rope_yarn_ramp(p.corr_dims[0], p.corr_dims[1], i0) * p.ext_factor;
|
||||
theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
|
||||
|
||||
// Get n-d magnitude scaling corrected for interpolation
|
||||
mscale *= 1.0f + 0.1f * log(1.0f / p.freq_scale);
|
||||
}
|
||||
cos_theta = cos(theta) * mscale;
|
||||
sin_theta = sin(theta) * mscale;
|
||||
}
|
37
vulkan-shaders/rope_neox.comp
Normal file
37
vulkan-shaders/rope_neox.comp
Normal file
|
@ -0,0 +1,37 @@
|
|||
#version 450
|
||||
|
||||
#include "rope_head.comp"
|
||||
|
||||
void main() {
|
||||
const uint col = gl_GlobalInvocationID.y * 2;
|
||||
const uint row = gl_GlobalInvocationID.x;
|
||||
|
||||
if (col >= p.ncols) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (col >= p.n_dims) {
|
||||
const uint i = row*p.ncols + col;
|
||||
|
||||
data_d[i + 0] = data_a[i + 0];
|
||||
data_d[i + 1] = data_a[i + 1];
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const uint i = row*p.ncols + col/2;
|
||||
const uint i2 = row/p.p_delta_rows;
|
||||
|
||||
const float theta_base = data_pos[i2] * pow(p.theta_scale, col/2.0f);
|
||||
|
||||
const float freq_factor = p.has_ff != 0 ? data_ff[col/2] : 1.0f;
|
||||
|
||||
float cos_theta, sin_theta;
|
||||
rope_yarn(theta_base / freq_factor, col, cos_theta, sin_theta);
|
||||
|
||||
const float x0 = float(data_a[i + 0]);
|
||||
const float x1 = float(data_a[i + p.n_dims/2]);
|
||||
|
||||
data_d[i + 0] = D_TYPE(x0*cos_theta - x1*sin_theta);
|
||||
data_d[i + p.n_dims/2] = D_TYPE(x0*sin_theta + x1*cos_theta);
|
||||
}
|
37
vulkan-shaders/rope_norm.comp
Normal file
37
vulkan-shaders/rope_norm.comp
Normal file
|
@ -0,0 +1,37 @@
|
|||
#version 450
|
||||
|
||||
#include "rope_head.comp"
|
||||
|
||||
void main() {
|
||||
const uint col = gl_GlobalInvocationID.y * 2;
|
||||
const uint row = gl_GlobalInvocationID.x;
|
||||
|
||||
if (col >= p.ncols) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (col >= p.n_dims) {
|
||||
const uint i = row*p.ncols + col;
|
||||
|
||||
data_d[i + 0] = data_a[i + 0];
|
||||
data_d[i + 1] = data_a[i + 1];
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const uint i = row*p.ncols + col;
|
||||
const uint i2 = row/p.p_delta_rows;
|
||||
|
||||
const float theta_base = data_pos[i2] * pow(p.theta_scale, col/2.0f);
|
||||
|
||||
const float freq_factor = p.has_ff != 0 ? data_ff[col/2] : 1.0f;
|
||||
|
||||
float cos_theta, sin_theta;
|
||||
rope_yarn(theta_base / freq_factor, col, cos_theta, sin_theta);
|
||||
|
||||
const float x0 = float(data_a[i + 0]);
|
||||
const float x1 = float(data_a[i + 1]);
|
||||
|
||||
data_d[i + 0] = D_TYPE(x0*cos_theta - x1*sin_theta);
|
||||
data_d[i + 1] = D_TYPE(x0*sin_theta + x1*cos_theta);
|
||||
}
|
12
vulkan-shaders/scale.comp
Normal file
12
vulkan-shaders/scale.comp
Normal file
|
@ -0,0 +1,12 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_unary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]) * FLOAT_TYPE(p.param1));
|
||||
}
|
22
vulkan-shaders/silu.comp
Normal file
22
vulkan-shaders/silu.comp
Normal file
|
@ -0,0 +1,22 @@
|
|||
#version 450
|
||||
|
||||
#include "generic_head.comp"
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_GlobalInvocationID.x;
|
||||
|
||||
if (i >= p.KX) {
|
||||
return;
|
||||
}
|
||||
|
||||
const float xi = float(data_a[i]);
|
||||
data_d[i] = D_TYPE(xi / (1.0f + exp(-xi)));
|
||||
}
|
106
vulkan-shaders/soft_max.comp
Normal file
106
vulkan-shaders/soft_max.comp
Normal file
|
@ -0,0 +1,106 @@
|
|||
#version 450
|
||||
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint KX;
|
||||
uint KY;
|
||||
float scale;
|
||||
float max_bias;
|
||||
float m0;
|
||||
float m1;
|
||||
uint n_head_log2;
|
||||
} p;
|
||||
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
#define BLOCK_SIZE 512
|
||||
|
||||
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) readonly buffer Y {B_TYPE data_b[];};
|
||||
layout (binding = 2) buffer D {D_TYPE data_d[];};
|
||||
|
||||
shared FLOAT_TYPE vals[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint tid = gl_LocalInvocationID.x;
|
||||
const uint rowx = gl_WorkGroupID.x;
|
||||
const uint rowy = rowx % p.KY;
|
||||
|
||||
float slope = 1.0f;
|
||||
|
||||
// ALiBi
|
||||
if (p.max_bias > 0.0f) {
|
||||
const uint h = rowx/p.KY; // head index
|
||||
|
||||
const float base = h < p.n_head_log2 ? p.m0 : p.m1;
|
||||
const uint exp = h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1;
|
||||
|
||||
slope = pow(base, exp);
|
||||
}
|
||||
|
||||
// Find max
|
||||
FLOAT_TYPE max_val = uintBitsToFloat(0xFF800000);
|
||||
|
||||
[[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) {
|
||||
const uint col = col0 + tid;
|
||||
|
||||
if (col >= p.KX) {
|
||||
break;
|
||||
}
|
||||
|
||||
max_val = max(max_val, FLOAT_TYPE(data_a[rowx * p.KX + col]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)));
|
||||
}
|
||||
vals[tid] = max_val;
|
||||
|
||||
barrier();
|
||||
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
vals[tid] = max(vals[tid], vals[tid + s]);
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
max_val = vals[0];
|
||||
barrier();
|
||||
|
||||
// Sum up values
|
||||
vals[tid] = FLOAT_TYPE(0.0f);
|
||||
|
||||
[[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) {
|
||||
const uint col = col0 + tid;
|
||||
|
||||
if (col >= p.KX) {
|
||||
break;
|
||||
}
|
||||
|
||||
const uint i = rowx * p.KX + col;
|
||||
const FLOAT_TYPE val = exp(FLOAT_TYPE(data_a[i]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)) - max_val);
|
||||
vals[tid] += val;
|
||||
data_d[i] = D_TYPE(val);
|
||||
}
|
||||
|
||||
barrier();
|
||||
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
|
||||
if (tid < s) {
|
||||
vals[tid] += vals[tid + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
const D_TYPE divisor = D_TYPE(vals[0]);
|
||||
|
||||
[[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) {
|
||||
const uint col = col0 + tid;
|
||||
|
||||
if (col >= p.KX) {
|
||||
break;
|
||||
}
|
||||
|
||||
data_d[rowx*p.KX + col] /= divisor;
|
||||
}
|
||||
}
|
13
vulkan-shaders/square.comp
Normal file
13
vulkan-shaders/square.comp
Normal file
|
@ -0,0 +1,13 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_unary_head.comp"
|
||||
|
||||
void main() {
|
||||
if (gl_GlobalInvocationID.x >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]);
|
||||
data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(val * val);
|
||||
}
|
37
vulkan-shaders/sum_rows.comp
Normal file
37
vulkan-shaders/sum_rows.comp
Normal file
|
@ -0,0 +1,37 @@
|
|||
#version 450
|
||||
|
||||
#include "generic_head.comp"
|
||||
#include "types.comp"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
layout (constant_id = 0) const uint BLOCK_SIZE = 32;
|
||||
|
||||
shared FLOAT_TYPE tmp[BLOCK_SIZE];
|
||||
|
||||
void main() {
|
||||
const uint row = gl_WorkGroupID.x;
|
||||
const uint col = gl_LocalInvocationID.x;
|
||||
|
||||
tmp[col] = FLOAT_TYPE(0.0f);
|
||||
|
||||
for (uint i = col; i < p.KX; i += BLOCK_SIZE) {
|
||||
tmp[col] += FLOAT_TYPE(data_a[row*p.KX + i]);
|
||||
}
|
||||
|
||||
barrier();
|
||||
[[unroll]] for (int s = int(BLOCK_SIZE) / 2; s > 0; s >>= 1) {
|
||||
if (col < s) {
|
||||
tmp[col] += tmp[col + s];
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
if (col == 0) {
|
||||
data_d[row] = D_TYPE(tmp[0]);
|
||||
}
|
||||
}
|
179
vulkan-shaders/types.comp
Normal file
179
vulkan-shaders/types.comp
Normal file
|
@ -0,0 +1,179 @@
|
|||
#if !defined(DATA_A_F32) && !defined(DATA_A_F16)
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_F32)
|
||||
#define QUANT_K 1
|
||||
#define QUANT_R 1
|
||||
|
||||
#ifndef LOAD_VEC_A
|
||||
#define A_TYPE float
|
||||
#elif LOAD_VEC_A == 4
|
||||
#define A_TYPE vec4
|
||||
#elif LOAD_VEC_A == 8
|
||||
#define A_TYPE mat2x4
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_F16)
|
||||
#define QUANT_K 1
|
||||
#define QUANT_R 1
|
||||
|
||||
#ifndef LOAD_VEC_A
|
||||
#define A_TYPE float16_t
|
||||
#elif LOAD_VEC_A == 4
|
||||
#define A_TYPE f16vec4
|
||||
#elif LOAD_VEC_A == 8
|
||||
#define A_TYPE f16mat2x4
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q4_0)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 32
|
||||
#define QUANT_R 2
|
||||
|
||||
struct block_q4_0
|
||||
{
|
||||
float16_t d;
|
||||
uint8_t qs[16];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q4_0
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q4_1)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 32
|
||||
#define QUANT_R 2
|
||||
|
||||
struct block_q4_1
|
||||
{
|
||||
float16_t d;
|
||||
float16_t m;
|
||||
uint8_t qs[16];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q4_1
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q5_0)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
|
||||
#define QUANT_K 32
|
||||
#define QUANT_R 2
|
||||
|
||||
struct block_q5_0
|
||||
{
|
||||
float16_t d;
|
||||
uint16_t qh[2];
|
||||
uint8_t qs[16];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q5_0
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q5_1)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
|
||||
#define QUANT_K 32
|
||||
#define QUANT_R 2
|
||||
|
||||
struct block_q5_1
|
||||
{
|
||||
float16_t d;
|
||||
float16_t m;
|
||||
uint qh;
|
||||
uint8_t qs[16];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q5_1
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q8_0)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 32
|
||||
#define QUANT_R 1
|
||||
|
||||
struct block_q8_0
|
||||
{
|
||||
float16_t d;
|
||||
int8_t qs[32];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q8_0
|
||||
#endif
|
||||
|
||||
// K-quants
|
||||
#if defined(DATA_A_Q2_K)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 256
|
||||
|
||||
struct block_q2_K
|
||||
{
|
||||
uint8_t scales[QUANT_K/16];
|
||||
uint8_t qs[QUANT_K/4];
|
||||
f16vec2 d;
|
||||
};
|
||||
|
||||
#define A_TYPE block_q2_K
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q3_K)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 256
|
||||
|
||||
struct block_q3_K
|
||||
{
|
||||
uint8_t hmask[QUANT_K/8];
|
||||
uint8_t qs[QUANT_K/4];
|
||||
uint8_t scales[12];
|
||||
float16_t d;
|
||||
};
|
||||
|
||||
#define A_TYPE block_q3_K
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q4_K)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 256
|
||||
|
||||
struct block_q4_K
|
||||
{
|
||||
f16vec2 d;
|
||||
uint8_t scales[3*QUANT_K/64];
|
||||
uint8_t qs[QUANT_K/2];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q4_K
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q5_K)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 256
|
||||
|
||||
struct block_q5_K
|
||||
{
|
||||
f16vec2 d;
|
||||
uint8_t scales[12];
|
||||
uint8_t qh[QUANT_K/8];
|
||||
uint8_t qs[QUANT_K/2];
|
||||
};
|
||||
|
||||
#define A_TYPE block_q5_K
|
||||
#endif
|
||||
|
||||
#if defined(DATA_A_Q6_K)
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
#define QUANT_K 256
|
||||
|
||||
struct block_q6_K
|
||||
{
|
||||
uint8_t ql[QUANT_K/2];
|
||||
uint8_t qh[QUANT_K/4];
|
||||
int8_t scales[QUANT_K/16];
|
||||
float16_t d;
|
||||
};
|
||||
|
||||
#define A_TYPE block_q6_K
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue