Merge branch 'load-parallel-prompt-file' of https://github.com/pudepiedj/llama.cpp into load-parallel-prompt-file
This commit is contained in:
commit
b805ec2899
15 changed files with 1054 additions and 189 deletions
16
.github/workflows/build.yml
vendored
16
.github/workflows/build.yml
vendored
|
@ -188,7 +188,7 @@ jobs:
|
||||||
sysctl -a
|
sysctl -a
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF ..
|
cmake ..
|
||||||
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
|
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
|
@ -265,17 +265,17 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- build: 'noavx'
|
- build: 'noavx'
|
||||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'avx2'
|
- build: 'avx2'
|
||||||
defines: '-DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'avx'
|
- build: 'avx'
|
||||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'avx512'
|
- build: 'avx512'
|
||||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'clblast'
|
- build: 'clblast'
|
||||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"'
|
||||||
- build: 'openblas'
|
- build: 'openblas'
|
||||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
@ -414,7 +414,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUBLAS=ON -DBUILD_SHARED_LIBS=ON
|
cmake .. -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUBLAS=ON -DBUILD_SHARED_LIBS=ON
|
||||||
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
|
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
|
||||||
|
|
||||||
- name: Determine tag name
|
- name: Determine tag name
|
||||||
|
|
|
@ -44,7 +44,7 @@ endif()
|
||||||
|
|
||||||
# general
|
# general
|
||||||
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
||||||
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
|
option(LLAMA_NATIVE "llama: enable -march=native flag" ON)
|
||||||
option(LLAMA_LTO "llama: enable link time optimization" OFF)
|
option(LLAMA_LTO "llama: enable link time optimization" OFF)
|
||||||
|
|
||||||
# debug
|
# debug
|
||||||
|
@ -58,15 +58,21 @@ option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer"
|
||||||
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
|
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
|
||||||
|
|
||||||
# instruction set specific
|
# instruction set specific
|
||||||
option(LLAMA_AVX "llama: enable AVX" ON)
|
if (LLAMA_NATIVE)
|
||||||
option(LLAMA_AVX2 "llama: enable AVX2" ON)
|
set(INS_ENB OFF)
|
||||||
|
else()
|
||||||
|
set(INS_ENB ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
option(LLAMA_AVX "llama: enable AVX" ${INS_ENB})
|
||||||
|
option(LLAMA_AVX2 "llama: enable AVX2" ${INS_ENB})
|
||||||
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
|
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
|
||||||
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
|
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
|
||||||
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
|
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
|
||||||
option(LLAMA_FMA "llama: enable FMA" ON)
|
option(LLAMA_FMA "llama: enable FMA" ${INS_ENB})
|
||||||
# in MSVC F16C is implied with AVX2/AVX512
|
# in MSVC F16C is implied with AVX2/AVX512
|
||||||
if (NOT MSVC)
|
if (NOT MSVC)
|
||||||
option(LLAMA_F16C "llama: enable F16C" ON)
|
option(LLAMA_F16C "llama: enable F16C" ${INS_ENB})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# 3rd party libs
|
# 3rd party libs
|
||||||
|
@ -504,9 +510,6 @@ if (NOT MSVC)
|
||||||
if (LLAMA_GPROF)
|
if (LLAMA_GPROF)
|
||||||
add_compile_options(-pg)
|
add_compile_options(-pg)
|
||||||
endif()
|
endif()
|
||||||
if (LLAMA_NATIVE)
|
|
||||||
add_compile_options(-march=native)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if ((${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") OR (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") OR ("${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "arm64"))
|
if ((${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") OR (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") OR ("${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "arm64"))
|
||||||
|
@ -561,6 +564,9 @@ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$" OR "${CMAKE_GE
|
||||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>)
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>)
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
|
if (LLAMA_NATIVE)
|
||||||
|
add_compile_options(-march=native)
|
||||||
|
endif()
|
||||||
if (LLAMA_F16C)
|
if (LLAMA_F16C)
|
||||||
add_compile_options(-mf16c)
|
add_compile_options(-mf16c)
|
||||||
endif()
|
endif()
|
||||||
|
|
|
@ -9,7 +9,7 @@ if [[ -z "${PROMPT_CACHE_FILE+x}" || -z "${CHAT_SAVE_DIR+x}" ]]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}"
|
MODEL="${MODEL:-./models/llama-13b/ggml-model-q4_0.gguf}"
|
||||||
PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}"
|
PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}"
|
||||||
USER_NAME="${USER_NAME:-User}"
|
USER_NAME="${USER_NAME:-User}"
|
||||||
AI_NAME="${AI_NAME:-ChatLLaMa}"
|
AI_NAME="${AI_NAME:-ChatLLaMa}"
|
||||||
|
@ -61,9 +61,9 @@ fi
|
||||||
|
|
||||||
if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then
|
if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then
|
||||||
echo 'Prompt cache does not exist, building...'
|
echo 'Prompt cache does not exist, building...'
|
||||||
# Default batch_size to 8 here for better user feedback during initial prompt processing
|
# Default batch_size to 64 here for better user feedback during initial prompt processing
|
||||||
./main 2>>"$LOG" \
|
./main 2>>"$LOG" \
|
||||||
--batch_size 8 \
|
--batch_size 64 \
|
||||||
"${OPTS[@]}" \
|
"${OPTS[@]}" \
|
||||||
--prompt-cache "$PROMPT_CACHE_FILE" \
|
--prompt-cache "$PROMPT_CACHE_FILE" \
|
||||||
--file "$CUR_PROMPT_FILE" \
|
--file "$CUR_PROMPT_FILE" \
|
||||||
|
@ -132,7 +132,7 @@ while read -e line; do
|
||||||
# HACK get num tokens from debug message
|
# HACK get num tokens from debug message
|
||||||
# TODO get both messages in one go
|
# TODO get both messages in one go
|
||||||
if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" ||
|
if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" ||
|
||||||
! sample_time_msg="$( tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then
|
! sample_time_msg="$(tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then
|
||||||
echo >&2 "Couldn't get number of tokens from ./main output!"
|
echo >&2 "Couldn't get number of tokens from ./main output!"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -61,7 +61,7 @@ For example to apply 40% of the 'shakespeare' LORA adapter, 80% of the 'bible' L
|
||||||
--lora lora-open-llama-3b-v2-q8_0-yet-another-one-LATEST.bin
|
--lora lora-open-llama-3b-v2-q8_0-yet-another-one-LATEST.bin
|
||||||
```
|
```
|
||||||
|
|
||||||
The scale numbers don't need to add up to one, and you can also use numbers creater than 1 to further increase the influence of an adapter. But making the values to big will sometimes result in worse output. Play around to find good values.
|
The scale numbers don't need to add up to one, and you can also use numbers greater than 1 to further increase the influence of an adapter. But making the values to big will sometimes result in worse output. Play around to find good values.
|
||||||
|
|
||||||
Gradient checkpointing reduces the memory requirements by ~50% but increases the runtime.
|
Gradient checkpointing reduces the memory requirements by ~50% but increases the runtime.
|
||||||
If you have enough RAM, you can make finetuning a bit faster by disabling checkpointing with `--no-checkpointing`.
|
If you have enough RAM, you can make finetuning a bit faster by disabling checkpointing with `--no-checkpointing`.
|
||||||
|
|
|
@ -543,6 +543,9 @@ int main(int argc, char ** argv) {
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
embd.erase(embd.begin(), embd.begin() + i);
|
embd.erase(embd.begin(), embd.begin() + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remove any "future" tokens that we might have inherited from the session from the KV cache
|
||||||
|
llama_kv_cache_tokens_rm(ctx, n_past, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate tokens in batches
|
// evaluate tokens in batches
|
||||||
|
@ -667,7 +670,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
// reset color to default if we there is no pending user input
|
// reset color to default if there is no pending user input
|
||||||
if (input_echo && (int) embd_inp.size() == n_consumed) {
|
if (input_echo && (int) embd_inp.size() == n_consumed) {
|
||||||
console::set_display(console::reset);
|
console::set_display(console::reset);
|
||||||
}
|
}
|
||||||
|
@ -694,10 +697,8 @@ int main(int argc, char ** argv) {
|
||||||
if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
|
if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
is_interacting = true;
|
is_interacting = true;
|
||||||
console::set_display(console::user_input);
|
|
||||||
}
|
}
|
||||||
is_antiprompt = true;
|
is_antiprompt = true;
|
||||||
fflush(stdout);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -721,8 +722,6 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
is_interacting = true;
|
is_interacting = true;
|
||||||
printf("\n");
|
printf("\n");
|
||||||
console::set_display(console::user_input);
|
|
||||||
fflush(stdout);
|
|
||||||
} else if (params.instruct) {
|
} else if (params.instruct) {
|
||||||
is_interacting = true;
|
is_interacting = true;
|
||||||
}
|
}
|
||||||
|
@ -747,6 +746,9 @@ int main(int argc, char ** argv) {
|
||||||
printf("%s", buffer.c_str());
|
printf("%s", buffer.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// color user input only
|
||||||
|
console::set_display(console::user_input);
|
||||||
|
|
||||||
std::string line;
|
std::string line;
|
||||||
bool another_line = true;
|
bool another_line = true;
|
||||||
do {
|
do {
|
||||||
|
|
|
@ -370,7 +370,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
|
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
|
||||||
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, n_ctx);
|
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, -1);
|
||||||
|
|
||||||
const auto t_main_end = ggml_time_us();
|
const auto t_main_end = ggml_time_us();
|
||||||
|
|
||||||
|
|
|
@ -448,7 +448,7 @@ struct llama_server_context
|
||||||
n_past = common_part(embd, prompt_tokens);
|
n_past = common_part(embd, prompt_tokens);
|
||||||
|
|
||||||
// since #3228 we now have to manually manage the KV cache
|
// since #3228 we now have to manually manage the KV cache
|
||||||
llama_kv_cache_seq_rm(ctx, 0, n_past, params.n_ctx);
|
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
|
||||||
|
|
||||||
embd = prompt_tokens;
|
embd = prompt_tokens;
|
||||||
if (n_past == num_prompt_tokens)
|
if (n_past == num_prompt_tokens)
|
||||||
|
|
|
@ -172,7 +172,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG("out of drafted tokens\n");
|
LOG("out of drafted tokens\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, n_ctx);
|
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
|
||||||
llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0));
|
llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0));
|
||||||
++n_past_dft;
|
++n_past_dft;
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate the drafted token on the draft model
|
// evaluate the drafted token on the draft model
|
||||||
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_cur, n_ctx);
|
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_cur, -1);
|
||||||
llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0));
|
llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0));
|
||||||
++n_past_cur;
|
++n_past_cur;
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate the target model on the drafted tokens
|
// evaluate the target model on the drafted tokens
|
||||||
llama_kv_cache_seq_rm(ctx_tgt, 0, n_past_tgt, n_ctx);
|
llama_kv_cache_seq_rm(ctx_tgt, 0, n_past_tgt, -1);
|
||||||
llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0));
|
llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0));
|
||||||
++n_past_tgt;
|
++n_past_tgt;
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@
|
||||||
mkdir -p $out/include
|
mkdir -p $out/include
|
||||||
cp ${src}/llama.h $out/include/
|
cp ${src}/llama.h $out/include/
|
||||||
'';
|
'';
|
||||||
cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" "-DLLAMA_MPI=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ];
|
cmakeFlags = [ "-DLLAMA_NATIVE=OFF" "-DLLAMA_BUILD_SERVER=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ];
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages.default = pkgs.stdenv.mkDerivation {
|
packages.default = pkgs.stdenv.mkDerivation {
|
||||||
|
|
|
@ -1213,12 +1213,9 @@ void ggml_metal_graph_compute(
|
||||||
float max_bias;
|
float max_bias;
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
||||||
|
|
||||||
if (__builtin_popcount(n_head) != 1) {
|
|
||||||
GGML_ASSERT(false && "only power-of-two n_head implemented");
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_alibi_f32];
|
[encoder setComputePipelineState:ctx->pipeline_alibi_f32];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
@ -1240,6 +1237,8 @@ void ggml_metal_graph_compute(
|
||||||
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
||||||
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
||||||
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
|
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
|
||||||
|
[encoder setBytes:&m1 length:sizeof( float) atIndex:19];
|
||||||
|
[encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
|
|
|
@ -831,6 +831,8 @@ kernel void kernel_alibi_f32(
|
||||||
constant uint64_t & nb2,
|
constant uint64_t & nb2,
|
||||||
constant uint64_t & nb3,
|
constant uint64_t & nb3,
|
||||||
constant float & m0,
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant int & n_heads_log2_floor,
|
||||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
uint3 ntg[[threads_per_threadgroup]]) {
|
uint3 ntg[[threads_per_threadgroup]]) {
|
||||||
|
@ -846,7 +848,12 @@ kernel void kernel_alibi_f32(
|
||||||
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
||||||
|
|
||||||
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
||||||
float m_k = pow(m0, i2 + 1);
|
float m_k;
|
||||||
|
if (i2 < n_heads_log2_floor) {
|
||||||
|
m_k = pow(m0, i2 + 1);
|
||||||
|
} else {
|
||||||
|
m_k = pow(m1, 2 * (i2 - n_heads_log2_floor) + 1);
|
||||||
|
}
|
||||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
||||||
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
||||||
dst_data[i00] = src[0] + m_k * (i00 - ne00 + 1);
|
dst_data[i00] = src[0] + m_k * (i00 - ne00 + 1);
|
||||||
|
|
250
ggml.c
250
ggml.c
|
@ -1272,6 +1272,33 @@ static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int
|
||||||
_mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
|
_mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#elif defined(__riscv_v_intrinsic)
|
||||||
|
|
||||||
|
size_t vl = __riscv_vsetvl_e32m4(QK8_0);
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; i++) {
|
||||||
|
// load elements
|
||||||
|
vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
|
||||||
|
|
||||||
|
vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
|
||||||
|
vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
|
||||||
|
vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
|
||||||
|
float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
|
||||||
|
|
||||||
|
const float d = amax / ((1 << 7) - 1);
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
y[i].d = GGML_FP32_TO_FP16(d);
|
||||||
|
|
||||||
|
vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
|
||||||
|
|
||||||
|
// convert to integer
|
||||||
|
vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
|
||||||
|
vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
|
||||||
|
|
||||||
|
// store result
|
||||||
|
__riscv_vse8_v_i8m1(y[i].qs , vs, vl);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
// scalar
|
// scalar
|
||||||
quantize_row_q8_0_reference(x, y, k);
|
quantize_row_q8_0_reference(x, y, k);
|
||||||
|
@ -1490,6 +1517,41 @@ static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int
|
||||||
_mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
|
_mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#elif defined(__riscv_v_intrinsic)
|
||||||
|
|
||||||
|
size_t vl = __riscv_vsetvl_e32m4(QK8_1);
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; i++) {
|
||||||
|
// load elements
|
||||||
|
vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
|
||||||
|
|
||||||
|
vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
|
||||||
|
vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
|
||||||
|
vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
|
||||||
|
float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
|
||||||
|
|
||||||
|
const float d = amax / ((1 << 7) - 1);
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
y[i].d = d;
|
||||||
|
|
||||||
|
vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
|
||||||
|
|
||||||
|
// convert to integer
|
||||||
|
vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
|
||||||
|
vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
|
||||||
|
|
||||||
|
// store result
|
||||||
|
__riscv_vse8_v_i8m1(y[i].qs , vs, vl);
|
||||||
|
|
||||||
|
// compute sum for y[i].s
|
||||||
|
vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
|
||||||
|
vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
|
||||||
|
|
||||||
|
// set y[i].s
|
||||||
|
int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
|
||||||
|
y[i].s = sum*d;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
// scalar
|
// scalar
|
||||||
quantize_row_q8_1_reference(x, y, k);
|
quantize_row_q8_1_reference(x, y, k);
|
||||||
|
@ -2662,30 +2724,32 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
|
||||||
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
||||||
|
|
||||||
for (int i = 0; i < nb; i++) {
|
for (int i = 0; i < nb; i++) {
|
||||||
vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
|
// load elements
|
||||||
|
vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
|
||||||
|
|
||||||
vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
|
vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
|
||||||
vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
|
vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
|
||||||
|
|
||||||
vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
|
// mask and store lower part of x, and then upper part
|
||||||
vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
|
vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
|
||||||
|
vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
|
||||||
|
|
||||||
vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
|
vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
|
||||||
vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
|
vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
|
||||||
|
|
||||||
vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl);
|
// subtract offset
|
||||||
vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl);
|
vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
|
||||||
|
vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
|
||||||
|
|
||||||
vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
|
vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
|
||||||
vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
|
vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
|
||||||
|
|
||||||
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
||||||
|
|
||||||
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
|
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
|
||||||
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
|
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
|
||||||
|
|
||||||
int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
|
int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
|
||||||
sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
|
|
||||||
|
|
||||||
sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
|
sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
|
||||||
}
|
}
|
||||||
|
@ -2823,27 +2887,28 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void *
|
||||||
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
||||||
|
|
||||||
for (int i = 0; i < nb; i++) {
|
for (int i = 0; i < nb; i++) {
|
||||||
vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
|
// load elements
|
||||||
|
vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
|
||||||
|
|
||||||
vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
|
vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
|
||||||
vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
|
vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
|
||||||
|
|
||||||
vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
|
// mask and store lower part of x, and then upper part
|
||||||
vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
|
vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
|
||||||
|
vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
|
||||||
|
|
||||||
vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
|
vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
|
||||||
vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
|
vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
|
||||||
|
|
||||||
vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
|
vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
|
||||||
vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
|
vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
|
||||||
|
|
||||||
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
||||||
|
|
||||||
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
|
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
|
||||||
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
|
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
|
||||||
|
|
||||||
int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
|
int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
|
||||||
sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
|
|
||||||
|
|
||||||
sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
|
sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
|
||||||
}
|
}
|
||||||
|
@ -3088,66 +3153,61 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void *
|
||||||
|
|
||||||
uint32_t qh;
|
uint32_t qh;
|
||||||
|
|
||||||
// These temp values are for masking and shift operations
|
|
||||||
uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
|
|
||||||
uint32_t temp_2[16] = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80,
|
|
||||||
0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000};
|
|
||||||
|
|
||||||
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
||||||
|
|
||||||
|
// These tempory registers are for masking and shift operations
|
||||||
|
vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
|
||||||
|
vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
|
||||||
|
|
||||||
|
vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
|
||||||
|
vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
|
||||||
|
|
||||||
for (int i = 0; i < nb; i++) {
|
for (int i = 0; i < nb; i++) {
|
||||||
memcpy(&qh, x[i].qh, sizeof(uint32_t));
|
memcpy(&qh, x[i].qh, sizeof(uint32_t));
|
||||||
|
|
||||||
// temporary registers
|
|
||||||
vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_2, vl);
|
|
||||||
vuint32m4_t vt_2 = __riscv_vle32_v_u32m4(temp_1, vl);
|
|
||||||
vuint32m4_t vt_3 = __riscv_vsll_vx_u32m4(vt_1, 16, vl);
|
|
||||||
vuint32m4_t vt_4 = __riscv_vadd_vx_u32m4(vt_2, 12, vl);
|
|
||||||
|
|
||||||
// ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
|
// ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
|
||||||
vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(vt_1, qh, vl);
|
vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
|
||||||
vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(xha_0, vt_2, vl);
|
vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
|
||||||
vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
|
vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
|
||||||
|
|
||||||
// ((qh & (1u << (j + 16))) >> (j + 12));
|
// ((qh & (1u << (j + 16))) >> (j + 12));
|
||||||
vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(vt_3, qh, vl);
|
vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
|
||||||
vuint32m4_t xhl_1 = __riscv_vsrl_vv_u32m4(xha_1, vt_4, vl);
|
vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
|
||||||
|
|
||||||
// narrowing
|
// narrowing
|
||||||
vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xhl_0, vl);
|
vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
|
||||||
vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
|
vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
|
||||||
|
|
||||||
vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xhl_1, vl);
|
vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
|
||||||
vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
|
vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
|
||||||
|
|
||||||
// load
|
// load
|
||||||
vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
|
vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
|
||||||
|
|
||||||
vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
|
vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
|
||||||
vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
|
vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
|
||||||
|
|
||||||
vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
|
vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
|
||||||
vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
|
vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
|
||||||
|
|
||||||
vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
|
vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
|
||||||
vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
|
vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
|
||||||
|
|
||||||
vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
|
vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
|
||||||
vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
|
vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
|
||||||
|
|
||||||
vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 16, vl);
|
vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
|
||||||
vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 16, vl);
|
vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
|
||||||
|
|
||||||
vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
|
vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
|
||||||
vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
|
vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
|
||||||
|
|
||||||
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
||||||
|
|
||||||
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
|
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
|
||||||
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
|
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
|
||||||
|
|
||||||
int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
|
int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
|
||||||
sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
|
|
||||||
|
|
||||||
sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
|
sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
|
||||||
}
|
}
|
||||||
|
@ -3414,62 +3474,58 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void *
|
||||||
|
|
||||||
uint32_t qh;
|
uint32_t qh;
|
||||||
|
|
||||||
// These temp values are for shift operations
|
|
||||||
uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
|
|
||||||
|
|
||||||
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
size_t vl = __riscv_vsetvl_e8m1(qk/2);
|
||||||
|
|
||||||
|
// temporary registers for shift operations
|
||||||
|
vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
|
||||||
|
vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
|
||||||
|
|
||||||
for (int i = 0; i < nb; i++) {
|
for (int i = 0; i < nb; i++) {
|
||||||
memcpy(&qh, x[i].qh, sizeof(uint32_t));
|
memcpy(&qh, x[i].qh, sizeof(uint32_t));
|
||||||
|
|
||||||
// temporary registers
|
|
||||||
vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_1, vl);
|
|
||||||
vuint32m4_t vt_2 = __riscv_vadd_vx_u32m4(vt_1, 12, vl);
|
|
||||||
|
|
||||||
// load qh
|
// load qh
|
||||||
vuint32m4_t vqh = __riscv_vmv_v_x_u32m4(qh, vl);
|
vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
|
||||||
|
|
||||||
// ((qh >> (j + 0)) << 4) & 0x10;
|
// ((qh >> (j + 0)) << 4) & 0x10;
|
||||||
vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(vqh, vt_1, vl);
|
vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
|
||||||
vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
|
vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
|
||||||
vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(xhl_0, 0x10, vl);
|
vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
|
||||||
|
|
||||||
// ((qh >> (j + 12)) ) & 0x10;
|
// ((qh >> (j + 12)) ) & 0x10;
|
||||||
vuint32m4_t xhr_1 = __riscv_vsrl_vv_u32m4(vqh, vt_2, vl);
|
vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
|
||||||
vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(xhr_1, 0x10, vl);
|
vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
|
||||||
|
|
||||||
// narrowing
|
// narrowing
|
||||||
vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xha_0, vl);
|
vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
|
||||||
vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
|
vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
|
||||||
|
|
||||||
vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xha_1, vl);
|
vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
|
||||||
vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
|
vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
|
||||||
|
|
||||||
// load
|
// load
|
||||||
vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
|
vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
|
||||||
|
|
||||||
vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
|
vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
|
||||||
vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
|
vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
|
||||||
|
|
||||||
vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
|
vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
|
||||||
vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
|
vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
|
||||||
|
|
||||||
vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
|
vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
|
||||||
vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
|
vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
|
||||||
|
|
||||||
vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
|
vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
|
||||||
vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
|
vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
|
||||||
|
|
||||||
vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
|
vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
|
||||||
vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
|
vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
|
||||||
|
|
||||||
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
|
||||||
|
|
||||||
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
|
vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
|
||||||
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
|
vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
|
||||||
|
|
||||||
int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
|
int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
|
||||||
sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
|
|
||||||
|
|
||||||
sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
|
sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
|
||||||
}
|
}
|
||||||
|
|
744
k_quants.c
744
k_quants.c
|
@ -54,6 +54,10 @@ inline static int32_t vaddvq_s32(int32x4_t v) {
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef __riscv_v_intrinsic
|
||||||
|
#include <riscv_vector.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#undef MIN
|
#undef MIN
|
||||||
#undef MAX
|
#undef MAX
|
||||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||||
|
@ -1582,6 +1586,90 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const uint8_t * q2 = x[i].qs;
|
||||||
|
const int8_t * q8 = y[i].qs;
|
||||||
|
const uint8_t * sc = x[i].scales;
|
||||||
|
|
||||||
|
const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||||
|
const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
|
||||||
|
|
||||||
|
size_t vl = 16;
|
||||||
|
|
||||||
|
vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
|
||||||
|
vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
|
||||||
|
|
||||||
|
vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
|
||||||
|
|
||||||
|
vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
|
||||||
|
vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
|
||||||
|
vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
|
||||||
|
vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
|
||||||
|
vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
|
||||||
|
|
||||||
|
sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
|
||||||
|
|
||||||
|
vl = 32;
|
||||||
|
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
|
||||||
|
|
||||||
|
uint8_t is=0;
|
||||||
|
int isum=0;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK_K/128; ++j) {
|
||||||
|
// load Q2
|
||||||
|
vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
|
||||||
|
|
||||||
|
vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
|
||||||
|
vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
|
||||||
|
vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
|
||||||
|
vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
|
||||||
|
|
||||||
|
// duplicate scale elements for product
|
||||||
|
vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
|
||||||
|
vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
|
||||||
|
vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
|
||||||
|
vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
|
||||||
|
|
||||||
|
vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
|
||||||
|
vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
|
||||||
|
vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
|
||||||
|
vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
|
||||||
|
|
||||||
|
// load Q8
|
||||||
|
vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
|
||||||
|
vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
|
||||||
|
vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
|
||||||
|
vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
|
||||||
|
|
||||||
|
vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
|
||||||
|
vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
|
||||||
|
vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
|
||||||
|
vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
|
||||||
|
|
||||||
|
vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
|
||||||
|
vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
|
||||||
|
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(isum1);
|
||||||
|
|
||||||
|
q2+=32; q8+=128; is=8;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
sumf += dall * isum;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
float sumf = 0;
|
float sumf = 0;
|
||||||
|
@ -1807,6 +1895,64 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc) + summs;
|
*s = hsum_float_8(acc) + summs;
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
uint32_t aux32[2];
|
||||||
|
const uint8_t * scales = (const uint8_t *)aux32;
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const float d = y[i].d * (float)x[i].d;
|
||||||
|
const float dmin = -y[i].d * (float)x[i].dmin;
|
||||||
|
|
||||||
|
const uint8_t * restrict q2 = x[i].qs;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
|
||||||
|
|
||||||
|
aux32[0] = sc[0] & 0x0f0f0f0f;
|
||||||
|
aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
|
||||||
|
|
||||||
|
sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
|
||||||
|
|
||||||
|
int isum1 = 0;
|
||||||
|
int isum2 = 0;
|
||||||
|
|
||||||
|
size_t vl = 16;
|
||||||
|
|
||||||
|
vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
|
||||||
|
|
||||||
|
// load Q2
|
||||||
|
vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
|
||||||
|
|
||||||
|
vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
|
||||||
|
vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
|
||||||
|
vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
|
||||||
|
vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
|
||||||
|
|
||||||
|
// load Q8, and take product with Q2
|
||||||
|
vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
|
||||||
|
vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
|
||||||
|
vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
|
||||||
|
vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
|
||||||
|
|
||||||
|
vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
|
||||||
|
vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
|
||||||
|
vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
|
||||||
|
vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
|
||||||
|
|
||||||
|
isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
|
||||||
|
isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
|
||||||
|
isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
|
||||||
|
isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
|
||||||
|
|
||||||
|
sumf += d * (isum1 + isum2);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
float sumf = 0;
|
float sumf = 0;
|
||||||
|
@ -2220,6 +2366,106 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
uint32_t aux[3];
|
||||||
|
uint32_t utmp[4];
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const uint8_t * restrict q3 = x[i].qs;
|
||||||
|
const uint8_t * restrict qh = x[i].hmask;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
memcpy(aux, x[i].scales, 12);
|
||||||
|
utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
|
||||||
|
utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
|
||||||
|
utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
|
||||||
|
utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
|
||||||
|
|
||||||
|
int8_t * scale = (int8_t *)utmp;
|
||||||
|
for (int j = 0; j < 16; ++j) scale[j] -= 32;
|
||||||
|
|
||||||
|
|
||||||
|
size_t vl = 32;
|
||||||
|
uint8_t m = 1;
|
||||||
|
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
|
||||||
|
|
||||||
|
int sum_t = 0;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK_K; j += 128) {
|
||||||
|
|
||||||
|
vl = 32;
|
||||||
|
|
||||||
|
// load Q3
|
||||||
|
vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
|
||||||
|
|
||||||
|
vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
|
||||||
|
vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
|
||||||
|
vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
|
||||||
|
vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
|
||||||
|
|
||||||
|
// compute mask for subtraction
|
||||||
|
vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
|
||||||
|
vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
|
||||||
|
vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
|
||||||
|
m <<= 1;
|
||||||
|
|
||||||
|
vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
|
||||||
|
vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
|
||||||
|
vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
|
||||||
|
m <<= 1;
|
||||||
|
|
||||||
|
vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
|
||||||
|
vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
|
||||||
|
vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
|
||||||
|
m <<= 1;
|
||||||
|
|
||||||
|
vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
|
||||||
|
vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
|
||||||
|
vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
|
||||||
|
m <<= 1;
|
||||||
|
|
||||||
|
// load Q8 and take product with Q3
|
||||||
|
vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
|
||||||
|
vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
|
||||||
|
vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
|
||||||
|
vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
|
||||||
|
|
||||||
|
vl = 16;
|
||||||
|
|
||||||
|
// retreive lane to multiply with scale
|
||||||
|
vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
|
||||||
|
vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
|
||||||
|
vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
|
||||||
|
vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
|
||||||
|
vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
|
||||||
|
vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
|
||||||
|
vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
|
||||||
|
vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
|
||||||
|
|
||||||
|
vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
|
||||||
|
vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
|
||||||
|
vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
|
||||||
|
vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
|
||||||
|
|
||||||
|
sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
|
||||||
|
|
||||||
|
q3 += 32; q8 += 128; scale += 8;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
|
||||||
|
|
||||||
|
sumf += d*sum_t;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
// scalar version
|
// scalar version
|
||||||
// This function is written like this so the compiler can manage to vectorize most of it
|
// This function is written like this so the compiler can manage to vectorize most of it
|
||||||
|
@ -2523,6 +2769,79 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
uint16_t aux16[2];
|
||||||
|
int8_t * scales = (int8_t *)aux16;
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const uint8_t * restrict q3 = x[i].qs;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
const uint16_t a = *(const uint16_t *)x[i].scales;
|
||||||
|
aux16[0] = a & 0x0f0f;
|
||||||
|
aux16[1] = (a >> 4) & 0x0f0f;
|
||||||
|
|
||||||
|
for (int j = 0; j < 4; ++j) scales[j] -= 8;
|
||||||
|
|
||||||
|
int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
|
||||||
|
|
||||||
|
const float d = y[i].d * (float)x[i].d;
|
||||||
|
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
|
||||||
|
// load qh
|
||||||
|
vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
|
||||||
|
vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
|
||||||
|
|
||||||
|
size_t vl = 16;
|
||||||
|
|
||||||
|
// extend and combine both qh_x1 and qh_x2
|
||||||
|
vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
|
||||||
|
|
||||||
|
vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
|
||||||
|
vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
|
||||||
|
vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
|
||||||
|
vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
|
||||||
|
|
||||||
|
// load Q3
|
||||||
|
vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
|
||||||
|
|
||||||
|
vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
|
||||||
|
vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
|
||||||
|
vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
|
||||||
|
vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
|
||||||
|
|
||||||
|
vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
|
||||||
|
vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
|
||||||
|
vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
|
||||||
|
vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
|
||||||
|
|
||||||
|
// load Q8 and take product with Q3
|
||||||
|
vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
|
||||||
|
vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
|
||||||
|
vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
|
||||||
|
vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
|
||||||
|
|
||||||
|
vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
|
||||||
|
vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
|
||||||
|
vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
|
||||||
|
vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
|
||||||
|
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
|
||||||
|
|
||||||
|
sumf += d * isum;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
int8_t aux8[QK_K];
|
int8_t aux8[QK_K];
|
||||||
|
@ -2823,6 +3142,78 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
|
*s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||||
|
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
size_t vl = 8;
|
||||||
|
|
||||||
|
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||||
|
const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin);
|
||||||
|
|
||||||
|
vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
|
||||||
|
vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
|
||||||
|
vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
|
||||||
|
|
||||||
|
memcpy(utmp, x[i].scales, 12);
|
||||||
|
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||||
|
const uint32_t uaux = utmp[1] & kmask1;
|
||||||
|
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||||
|
utmp[2] = uaux;
|
||||||
|
utmp[0] &= kmask1;
|
||||||
|
|
||||||
|
vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
|
||||||
|
vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
|
||||||
|
vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
|
||||||
|
|
||||||
|
vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
|
||||||
|
sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
|
||||||
|
|
||||||
|
const uint8_t * restrict q4 = x[i].qs;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
vl = 32;
|
||||||
|
|
||||||
|
int32_t sum_1 = 0;
|
||||||
|
int32_t sum_2 = 0;
|
||||||
|
|
||||||
|
vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
|
||||||
|
|
||||||
|
for (int j = 0; j < QK_K/64; ++j) {
|
||||||
|
// load Q4
|
||||||
|
vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
|
||||||
|
|
||||||
|
// load Q8 and multiply it with lower Q4 nibble
|
||||||
|
vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
|
||||||
|
vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
|
||||||
|
vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
|
||||||
|
vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
|
||||||
|
|
||||||
|
sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
|
||||||
|
|
||||||
|
// load Q8 and multiply it with upper Q4 nibble
|
||||||
|
vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
|
||||||
|
vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
|
||||||
|
vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
|
||||||
|
vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
|
||||||
|
|
||||||
|
sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
|
||||||
|
|
||||||
|
q4 += 32; q8 += 64;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
sumf += d*(sum_1 + sum_2);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
|
||||||
|
@ -3064,6 +3455,50 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc) - summs;
|
*s = hsum_float_8(acc) - summs;
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
uint16_t s16[2];
|
||||||
|
const uint8_t * restrict scales = (const uint8_t *)s16;
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const uint8_t * restrict q4 = x[i].qs;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
const uint16_t * restrict b = (const uint16_t *)x[i].scales;
|
||||||
|
s16[0] = b[0] & 0x0f0f;
|
||||||
|
s16[1] = (b[0] >> 4) & 0x0f0f;
|
||||||
|
|
||||||
|
sumf -= y[i].d * ggml_fp16_to_fp32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
|
||||||
|
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d[0]);
|
||||||
|
|
||||||
|
size_t vl = 32;
|
||||||
|
|
||||||
|
vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
|
||||||
|
|
||||||
|
// load Q4
|
||||||
|
vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
|
||||||
|
|
||||||
|
// load Q8 and multiply it with lower Q4 nibble
|
||||||
|
vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
|
||||||
|
vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
|
||||||
|
vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
|
||||||
|
|
||||||
|
sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
|
||||||
|
|
||||||
|
// load Q8 and multiply it with upper Q4 nibble
|
||||||
|
vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
|
||||||
|
vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
|
||||||
|
vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
|
||||||
|
|
||||||
|
sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
uint8_t aux8[QK_K];
|
uint8_t aux8[QK_K];
|
||||||
|
@ -3394,6 +3829,93 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc) + summs;
|
*s = hsum_float_8(acc) + summs;
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||||
|
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
float sums = 0.0;
|
||||||
|
|
||||||
|
size_t vl;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
vl = 8;
|
||||||
|
|
||||||
|
const uint8_t * restrict q5 = x[i].qs;
|
||||||
|
const uint8_t * restrict hm = x[i].qh;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
|
||||||
|
const float dmin = ggml_fp16_to_fp32(x[i].dmin) * y[i].d;
|
||||||
|
|
||||||
|
vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
|
||||||
|
vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
|
||||||
|
vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
|
||||||
|
|
||||||
|
memcpy(utmp, x[i].scales, 12);
|
||||||
|
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||||
|
const uint32_t uaux = utmp[1] & kmask1;
|
||||||
|
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||||
|
utmp[2] = uaux;
|
||||||
|
utmp[0] &= kmask1;
|
||||||
|
|
||||||
|
vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
|
||||||
|
vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
|
||||||
|
vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
|
||||||
|
|
||||||
|
vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
|
||||||
|
sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
|
||||||
|
|
||||||
|
vl = 32;
|
||||||
|
int32_t aux32 = 0;
|
||||||
|
int is = 0;
|
||||||
|
|
||||||
|
uint8_t m = 1;
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
|
||||||
|
|
||||||
|
for (int j = 0; j < QK_K/64; ++j) {
|
||||||
|
// load Q5 and Q8
|
||||||
|
vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
|
||||||
|
vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
|
||||||
|
vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
|
||||||
|
|
||||||
|
// compute mask for addition
|
||||||
|
vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
|
||||||
|
vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
|
||||||
|
vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
|
||||||
|
vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
|
||||||
|
m <<= 1;
|
||||||
|
|
||||||
|
vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
|
||||||
|
vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
|
||||||
|
vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
|
||||||
|
vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
|
||||||
|
m <<= 1;
|
||||||
|
|
||||||
|
vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
|
||||||
|
vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
|
||||||
|
|
||||||
|
vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
|
||||||
|
vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
|
||||||
|
|
||||||
|
vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
|
||||||
|
vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
|
||||||
|
|
||||||
|
aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
|
||||||
|
q5 += 32; q8 += 64;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
|
||||||
|
sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf+sums;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||||
|
@ -3639,6 +4161,76 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const float d = y[i].d * (float)x[i].d;
|
||||||
|
const int8_t * sc = x[i].scales;
|
||||||
|
|
||||||
|
const uint8_t * restrict q5 = x[i].qs;
|
||||||
|
const uint8_t * restrict qh = x[i].qh;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
|
||||||
|
// load qh
|
||||||
|
vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
|
||||||
|
vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
|
||||||
|
|
||||||
|
size_t vl = 16;
|
||||||
|
|
||||||
|
// combine both qh_1 and qh_2
|
||||||
|
vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
|
||||||
|
|
||||||
|
vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
|
||||||
|
vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
|
||||||
|
vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
|
||||||
|
vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
|
||||||
|
|
||||||
|
vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
|
||||||
|
vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
|
||||||
|
vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
|
||||||
|
vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
|
||||||
|
|
||||||
|
// load q5
|
||||||
|
vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
|
||||||
|
vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
|
||||||
|
|
||||||
|
vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
|
||||||
|
vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
|
||||||
|
vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
|
||||||
|
vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
|
||||||
|
|
||||||
|
vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
|
||||||
|
vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
|
||||||
|
vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
|
||||||
|
vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
|
||||||
|
|
||||||
|
// load Q8 and multiply it with Q5
|
||||||
|
vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
|
||||||
|
vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
|
||||||
|
vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
|
||||||
|
vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
|
||||||
|
|
||||||
|
vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
|
||||||
|
vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
|
||||||
|
vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
|
||||||
|
vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
|
||||||
|
|
||||||
|
int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
|
||||||
|
int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
|
||||||
|
int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
|
||||||
|
int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
|
||||||
|
|
||||||
|
sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
int8_t aux8[QK_K];
|
int8_t aux8[QK_K];
|
||||||
|
@ -4023,6 +4615,91 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
|
||||||
|
|
||||||
|
const uint8_t * restrict q6 = x[i].ql;
|
||||||
|
const uint8_t * restrict qh = x[i].qh;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
const int8_t * restrict scale = x[i].scales;
|
||||||
|
|
||||||
|
size_t vl;
|
||||||
|
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
|
||||||
|
int sum_t = 0;
|
||||||
|
int is = 0;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK_K/128; ++j) {
|
||||||
|
|
||||||
|
vl = 32;
|
||||||
|
|
||||||
|
// load qh
|
||||||
|
vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
|
||||||
|
|
||||||
|
// load Q6
|
||||||
|
vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
|
||||||
|
vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
|
||||||
|
|
||||||
|
vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
|
||||||
|
vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
|
||||||
|
vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
|
||||||
|
vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
|
||||||
|
|
||||||
|
vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
|
||||||
|
vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
|
||||||
|
vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
|
||||||
|
vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
|
||||||
|
|
||||||
|
vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
|
||||||
|
vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
|
||||||
|
vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
|
||||||
|
vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
|
||||||
|
|
||||||
|
vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
|
||||||
|
vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
|
||||||
|
vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
|
||||||
|
vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
|
||||||
|
|
||||||
|
// load Q8 and take product
|
||||||
|
vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
|
||||||
|
vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
|
||||||
|
vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
|
||||||
|
vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
|
||||||
|
|
||||||
|
vl = 16;
|
||||||
|
|
||||||
|
vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
|
||||||
|
vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
|
||||||
|
vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
|
||||||
|
vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
|
||||||
|
vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
|
||||||
|
vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
|
||||||
|
vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
|
||||||
|
vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
|
||||||
|
|
||||||
|
vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
|
||||||
|
vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
|
||||||
|
vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
|
||||||
|
vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
|
||||||
|
|
||||||
|
sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
|
||||||
|
|
||||||
|
q6 += 64; qh += 32; q8 += 128; is=8;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
sumf += d * sum_t;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
int8_t aux8[QK_K];
|
int8_t aux8[QK_K];
|
||||||
|
@ -4276,6 +4953,73 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
||||||
|
#elif defined __riscv_v_intrinsic
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const float d_all = (float)x[i].d;
|
||||||
|
|
||||||
|
const uint8_t * restrict q6 = x[i].ql;
|
||||||
|
const uint8_t * restrict qh = x[i].qh;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
const int8_t * restrict scale = x[i].scales;
|
||||||
|
|
||||||
|
int32_t isum = 0;
|
||||||
|
|
||||||
|
size_t vl = 16;
|
||||||
|
|
||||||
|
vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
|
||||||
|
|
||||||
|
// load Q6
|
||||||
|
vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
|
||||||
|
vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
|
||||||
|
|
||||||
|
// load qh
|
||||||
|
vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
|
||||||
|
|
||||||
|
vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
|
||||||
|
qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
|
||||||
|
vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
|
||||||
|
qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
|
||||||
|
vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
|
||||||
|
qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
|
||||||
|
vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
|
||||||
|
|
||||||
|
vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
|
||||||
|
vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
|
||||||
|
vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
|
||||||
|
vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
|
||||||
|
|
||||||
|
vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
|
||||||
|
vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
|
||||||
|
vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
|
||||||
|
vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
|
||||||
|
|
||||||
|
// load Q8 and take product
|
||||||
|
vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
|
||||||
|
vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
|
||||||
|
vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
|
||||||
|
vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
|
||||||
|
|
||||||
|
vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
|
||||||
|
vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
|
||||||
|
vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
|
||||||
|
vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
|
||||||
|
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
|
||||||
|
isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
|
||||||
|
|
||||||
|
sumf += isum * d_all * y[i].d;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = sumf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
int8_t aux8[QK_K];
|
int8_t aux8[QK_K];
|
||||||
|
|
102
llama.cpp
102
llama.cpp
|
@ -1356,6 +1356,9 @@ static void llama_kv_cache_seq_rm(
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
llama_pos p1) {
|
llama_pos p1) {
|
||||||
|
if (p0 < 0) p0 = 0;
|
||||||
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cache.size; ++i) {
|
for (uint32_t i = 0; i < cache.size; ++i) {
|
||||||
if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
||||||
cache.cells[i].seq_id.erase(seq_id);
|
cache.cells[i].seq_id.erase(seq_id);
|
||||||
|
@ -1372,6 +1375,9 @@ static void llama_kv_cache_seq_cp(
|
||||||
llama_seq_id seq_id_dst,
|
llama_seq_id seq_id_dst,
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
llama_pos p1) {
|
llama_pos p1) {
|
||||||
|
if (p0 < 0) p0 = 0;
|
||||||
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cache.size; ++i) {
|
for (uint32_t i = 0; i < cache.size; ++i) {
|
||||||
if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
||||||
cache.cells[i].seq_id.insert(seq_id_dst);
|
cache.cells[i].seq_id.insert(seq_id_dst);
|
||||||
|
@ -1394,6 +1400,9 @@ static void llama_kv_cache_seq_shift(
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
llama_pos p1,
|
llama_pos p1,
|
||||||
llama_pos delta) {
|
llama_pos delta) {
|
||||||
|
if (p0 < 0) p0 = 0;
|
||||||
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cache.size; ++i) {
|
for (uint32_t i = 0; i < cache.size; ++i) {
|
||||||
if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
||||||
cache.cells[i].pos += delta;
|
cache.cells[i].pos += delta;
|
||||||
|
@ -7038,6 +7047,10 @@ int llama_n_embd(const struct llama_model * model) {
|
||||||
return model->hparams.n_embd;
|
return model->hparams.n_embd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float llama_rope_freq_scale_train(const struct llama_model * model) {
|
||||||
|
return model->hparams.rope_freq_scale_train;
|
||||||
|
}
|
||||||
|
|
||||||
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
|
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
|
||||||
return snprintf(buf, buf_size, "%s %s %s",
|
return snprintf(buf, buf_size, "%s %s %s",
|
||||||
llama_model_arch_name(model->arch).c_str(),
|
llama_model_arch_name(model->arch).c_str(),
|
||||||
|
@ -7205,16 +7218,6 @@ struct llama_data_file_context : llama_data_context {
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
|
static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
|
||||||
// TODO: does not support multi-sequence states
|
|
||||||
{
|
|
||||||
const auto & kv_self = ctx->kv_self;
|
|
||||||
for (uint32_t i = 0; i < kv_self.head; ++i) {
|
|
||||||
GGML_ASSERT(kv_self.cells[i].pos == (int32_t) i);
|
|
||||||
GGML_ASSERT(kv_self.cells[i].seq_id.size() == 1);
|
|
||||||
GGML_ASSERT(kv_self.cells[i].has_seq_id(0));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy rng
|
// copy rng
|
||||||
{
|
{
|
||||||
std::stringstream rng_ss;
|
std::stringstream rng_ss;
|
||||||
|
@ -7267,36 +7270,38 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat
|
||||||
const auto & hparams = ctx->model.hparams;
|
const auto & hparams = ctx->model.hparams;
|
||||||
const auto & cparams = ctx->cparams;
|
const auto & cparams = ctx->cparams;
|
||||||
|
|
||||||
const int n_layer = hparams.n_layer;
|
const auto n_layer = hparams.n_layer;
|
||||||
const int n_embd = hparams.n_embd_gqa();
|
const auto n_embd = hparams.n_embd_gqa();
|
||||||
const int n_ctx = cparams.n_ctx;
|
const auto n_ctx = cparams.n_ctx;
|
||||||
|
|
||||||
const size_t kv_size = kv_self.buf.size;
|
const size_t kv_buf_size = kv_self.buf.size;
|
||||||
const int kv_ntok = kv_self.head;
|
const uint32_t kv_head = kv_self.head;
|
||||||
|
const uint32_t kv_size = kv_self.size;
|
||||||
|
|
||||||
|
data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
|
||||||
|
data_ctx->write(&kv_head, sizeof(kv_head));
|
||||||
data_ctx->write(&kv_size, sizeof(kv_size));
|
data_ctx->write(&kv_size, sizeof(kv_size));
|
||||||
data_ctx->write(&kv_ntok, sizeof(kv_ntok));
|
|
||||||
|
|
||||||
if (kv_size) {
|
if (kv_buf_size) {
|
||||||
const size_t elt_size = ggml_element_size(kv_self.k);
|
const size_t elt_size = ggml_element_size(kv_self.k);
|
||||||
|
|
||||||
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
|
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
|
||||||
ggml_cgraph gf{};
|
ggml_cgraph gf{};
|
||||||
|
|
||||||
ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
|
ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
|
||||||
std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
|
std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
|
||||||
kout3d->data = kout3d_data.data();
|
kout3d->data = kout3d_data.data();
|
||||||
|
|
||||||
ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
|
ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
|
||||||
std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
|
std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
|
||||||
vout3d->data = vout3d_data.data();
|
vout3d->data = vout3d_data.data();
|
||||||
|
|
||||||
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
|
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
|
||||||
n_embd, kv_ntok, n_layer,
|
n_embd, kv_head, n_layer,
|
||||||
elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
|
elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
|
||||||
|
|
||||||
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
|
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
|
||||||
kv_ntok, n_embd, n_layer,
|
kv_head, n_embd, n_layer,
|
||||||
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
|
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
|
||||||
|
|
||||||
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
|
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
|
||||||
|
@ -7310,6 +7315,20 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat
|
||||||
data_ctx->write(kout3d_data.data(), kout3d_data.size());
|
data_ctx->write(kout3d_data.data(), kout3d_data.size());
|
||||||
data_ctx->write(vout3d_data.data(), vout3d_data.size());
|
data_ctx->write(vout3d_data.data(), vout3d_data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (uint32_t i = 0; i < kv_size; ++i) {
|
||||||
|
const auto & cell = kv_self.cells[i];
|
||||||
|
|
||||||
|
const llama_pos pos = cell.pos;
|
||||||
|
const size_t seq_id_size = cell.seq_id.size();
|
||||||
|
|
||||||
|
data_ctx->write(&pos, sizeof(pos));
|
||||||
|
data_ctx->write(&seq_id_size, sizeof(seq_id_size));
|
||||||
|
|
||||||
|
for (auto seq_id : cell.seq_id) {
|
||||||
|
data_ctx->write(&seq_id, sizeof(seq_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7381,34 +7400,36 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
|
||||||
const int n_embd = hparams.n_embd_gqa();
|
const int n_embd = hparams.n_embd_gqa();
|
||||||
const int n_ctx = cparams.n_ctx;
|
const int n_ctx = cparams.n_ctx;
|
||||||
|
|
||||||
size_t kv_size;
|
size_t kv_buf_size;
|
||||||
int kv_ntok;
|
uint32_t kv_head;
|
||||||
|
uint32_t kv_size;
|
||||||
|
|
||||||
|
memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
|
||||||
|
memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
|
||||||
memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
|
memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
|
||||||
memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
|
|
||||||
|
|
||||||
if (kv_size) {
|
if (kv_buf_size) {
|
||||||
GGML_ASSERT(kv_self.buf.size == kv_size);
|
GGML_ASSERT(kv_self.buf.size == kv_buf_size);
|
||||||
|
|
||||||
const size_t elt_size = ggml_element_size(kv_self.k);
|
const size_t elt_size = ggml_element_size(kv_self.k);
|
||||||
|
|
||||||
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
|
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
|
||||||
ggml_cgraph gf{};
|
ggml_cgraph gf{};
|
||||||
|
|
||||||
ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
|
ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
|
||||||
kin3d->data = (void *) inp;
|
kin3d->data = (void *) inp;
|
||||||
inp += ggml_nbytes(kin3d);
|
inp += ggml_nbytes(kin3d);
|
||||||
|
|
||||||
ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
|
ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
|
||||||
vin3d->data = (void *) inp;
|
vin3d->data = (void *) inp;
|
||||||
inp += ggml_nbytes(vin3d);
|
inp += ggml_nbytes(vin3d);
|
||||||
|
|
||||||
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
|
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
|
||||||
n_embd, kv_ntok, n_layer,
|
n_embd, kv_head, n_layer,
|
||||||
elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
|
elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
|
||||||
|
|
||||||
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
|
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
|
||||||
kv_ntok, n_embd, n_layer,
|
kv_head, n_embd, n_layer,
|
||||||
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
|
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
|
||||||
|
|
||||||
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
|
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
|
||||||
|
@ -7418,8 +7439,27 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
|
||||||
ggml_free(cpy_ctx);
|
ggml_free(cpy_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->kv_self.head = kv_ntok;
|
ctx->kv_self.head = kv_head;
|
||||||
ctx->kv_self.size = kv_size;
|
ctx->kv_self.size = kv_size;
|
||||||
|
|
||||||
|
ctx->kv_self.cells.resize(kv_size);
|
||||||
|
|
||||||
|
for (uint32_t i = 0; i < kv_size; ++i) {
|
||||||
|
llama_pos pos;
|
||||||
|
size_t seq_id_size;
|
||||||
|
|
||||||
|
memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
|
||||||
|
memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
|
||||||
|
|
||||||
|
ctx->kv_self.cells[i].pos = pos;
|
||||||
|
|
||||||
|
llama_seq_id seq_id;
|
||||||
|
|
||||||
|
for (size_t j = 0; j < seq_id_size; ++j) {
|
||||||
|
memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
|
||||||
|
ctx->kv_self.cells[i].seq_id.insert(seq_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t nread = inp - src;
|
const size_t nread = inp - src;
|
||||||
|
|
13
llama.h
13
llama.h
|
@ -42,7 +42,7 @@
|
||||||
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
||||||
|
|
||||||
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
||||||
#define LLAMA_SESSION_VERSION 1
|
#define LLAMA_SESSION_VERSION 2
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
||||||
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
||||||
|
@ -282,6 +282,9 @@ extern "C" {
|
||||||
LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
|
LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
|
||||||
LLAMA_API int llama_n_embd (const struct llama_model * model);
|
LLAMA_API int llama_n_embd (const struct llama_model * model);
|
||||||
|
|
||||||
|
// Get the model's RoPE frequency scaling factor
|
||||||
|
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
|
||||||
|
|
||||||
// Get a string describing the model type
|
// Get a string describing the model type
|
||||||
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||||
|
|
||||||
|
@ -330,12 +333,16 @@ extern "C" {
|
||||||
"avoid using this, it will be removed in the future, instead - count the tokens in user code");
|
"avoid using this, it will be removed in the future, instead - count the tokens in user code");
|
||||||
|
|
||||||
// Remove all tokens data of cells in [c0, c1)
|
// Remove all tokens data of cells in [c0, c1)
|
||||||
|
// c0 < 0 : [0, c1]
|
||||||
|
// c1 < 0 : [c0, inf)
|
||||||
LLAMA_API void llama_kv_cache_tokens_rm(
|
LLAMA_API void llama_kv_cache_tokens_rm(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
int32_t c0,
|
int32_t c0,
|
||||||
int32_t c1);
|
int32_t c1);
|
||||||
|
|
||||||
// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
|
// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
|
||||||
|
// p0 < 0 : [0, p1]
|
||||||
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API void llama_kv_cache_seq_rm(
|
LLAMA_API void llama_kv_cache_seq_rm(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
|
@ -344,6 +351,8 @@ extern "C" {
|
||||||
|
|
||||||
// Copy all tokens that belong to the specified sequence to another sequence
|
// Copy all tokens that belong to the specified sequence to another sequence
|
||||||
// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
|
// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
|
||||||
|
// p0 < 0 : [0, p1]
|
||||||
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API void llama_kv_cache_seq_cp(
|
LLAMA_API void llama_kv_cache_seq_cp(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id_src,
|
llama_seq_id seq_id_src,
|
||||||
|
@ -358,6 +367,8 @@ extern "C" {
|
||||||
|
|
||||||
// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
|
// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
|
||||||
// If the KV cache is RoPEd, the KV data is updated accordingly
|
// If the KV cache is RoPEd, the KV data is updated accordingly
|
||||||
|
// p0 < 0 : [0, p1]
|
||||||
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API void llama_kv_cache_seq_shift(
|
LLAMA_API void llama_kv_cache_seq_shift(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue