Compare commits
105 commits
master
...
ceb/nomic-
Author | SHA1 | Date | |
---|---|---|---|
|
1453215165 | ||
|
610394fff8 | ||
|
7addf2b878 | ||
|
16bc3c3be8 | ||
|
0f1a958a51 | ||
|
a97935e098 | ||
|
696faa8660 | ||
|
02b9bafe29 | ||
|
de9b0bbbe4 | ||
|
50579f27e9 | ||
|
8a99f69895 | ||
|
d5670d6e46 | ||
|
1eb8804c18 | ||
|
3773e1afe7 | ||
|
ae6d6824b7 | ||
|
904c563dbc | ||
|
3959283eed | ||
|
8b65f4c5e5 | ||
|
44b1a97a15 | ||
|
8072706210 | ||
|
2d2c76acc4 | ||
|
f58f581ca8 | ||
|
c8fd4ba846 | ||
|
f7cb0a65ef | ||
|
9af7f58b7b | ||
|
b906e126ca | ||
|
747e1eafcf | ||
|
27631dbb6e | ||
|
3e09e127eb | ||
|
56430c3209 | ||
|
9ae88baf38 | ||
|
a4bb9c5ced | ||
|
23f6d51f68 | ||
|
208cd52f7d | ||
|
1829f1d7be | ||
|
02c3309f6d | ||
|
9c4dfd06e8 | ||
|
fe26e6adff | ||
|
6474fc879a | ||
|
2a41ba7258 | ||
|
a934b2cb8a | ||
|
f194e1b6a6 | ||
|
39abedd1d7 | ||
|
84f7fc4553 | ||
|
71565eb0c3 | ||
|
af00cca08e | ||
|
c438c16896 | ||
|
a8cac53207 | ||
|
f88b198885 | ||
|
ffd0624be2 | ||
|
a5eb001eab | ||
|
e006d377dd | ||
|
89b71278ff | ||
|
1c17010188 | ||
|
74ddf0f17d | ||
|
8d9efbf97a | ||
|
752f7ebd61 | ||
|
8400015337 | ||
|
cbc0d1af79 | ||
|
21841d3163 | ||
|
cc05a602d6 | ||
|
c1fd64548d | ||
|
9bc52ebae3 | ||
|
8dc79ac380 | ||
|
cd0257ed0d | ||
|
4809890d80 | ||
|
b78a94bc6d | ||
|
d5741c07a5 | ||
|
3327d84a7f | ||
|
46385ee0d5 | ||
|
f0cd38b9ad | ||
|
09d83f0401 | ||
|
8564f79036 | ||
|
020b1745a0 | ||
|
ff4212d20f | ||
|
9db90cbe12 | ||
|
3d850db767 | ||
|
24a4a5956a | ||
|
bc4b5ed1cb | ||
|
de589ced7c | ||
|
6ac39752bf | ||
|
32289aa447 | ||
|
06d4b21598 | ||
|
f1c9bc1821 | ||
|
4b223ec432 | ||
|
5509f74318 | ||
|
601905e75e | ||
|
93306f16d0 | ||
|
77135a3bf5 | ||
|
9e4f8b4acc | ||
|
6b6c73a9e3 | ||
|
1b1416d7b7 | ||
|
2c24d67e7b | ||
|
addac25293 | ||
|
68aca6be08 | ||
|
4ed25b2f88 | ||
|
bd5f6399bb | ||
|
8bea719879 | ||
|
68cf1df6fb | ||
|
beee57266f | ||
|
b7e2e691d4 | ||
|
45c8778b49 | ||
|
8563fa001f | ||
|
48a45ea435 | ||
|
ba15dfd0be |
57 changed files with 5898 additions and 2093 deletions
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
[submodule "kompute"]
|
||||||
|
path = kompute
|
||||||
|
url = https://github.com/nomic-ai/kompute.git
|
167
CMakeLists.txt
167
CMakeLists.txt
|
@ -96,6 +96,7 @@ option(LLAMA_CLBLAST "llama: use CLBlast"
|
||||||
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
|
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
|
||||||
option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF)
|
option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF)
|
||||||
option(LLAMA_METAL_SHADER_DEBUG "llama: compile Metal with -fno-fast-math" OFF)
|
option(LLAMA_METAL_SHADER_DEBUG "llama: compile Metal with -fno-fast-math" OFF)
|
||||||
|
option(LLAMA_KOMPUTE "llama: use Kompute" OFF)
|
||||||
option(LLAMA_MPI "llama: use MPI" OFF)
|
option(LLAMA_MPI "llama: use MPI" OFF)
|
||||||
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
||||||
|
|
||||||
|
@ -442,6 +443,161 @@ if (LLAMA_HIPBLAS)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (LLAMA_KOMPUTE)
|
||||||
|
add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1)
|
||||||
|
find_package(Vulkan COMPONENTS glslc REQUIRED)
|
||||||
|
find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc)
|
||||||
|
if (NOT glslc_executable)
|
||||||
|
message(FATAL_ERROR "glslc not found")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
function(compile_shader)
|
||||||
|
set(options)
|
||||||
|
set(oneValueArgs)
|
||||||
|
set(multiValueArgs SOURCES)
|
||||||
|
cmake_parse_arguments(compile_shader "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||||
|
foreach(source ${compile_shader_SOURCES})
|
||||||
|
get_filename_component(filename ${source} NAME)
|
||||||
|
set(spv_file ${filename}.spv)
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${spv_file}
|
||||||
|
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${source}
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/common.comp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_getrows.comp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_mul_mv_q_n.comp
|
||||||
|
COMMAND ${glslc_executable} --target-env=vulkan1.2 -o ${spv_file} ${CMAKE_CURRENT_SOURCE_DIR}/${source}
|
||||||
|
COMMENT "Compiling ${source} to ${spv_file}"
|
||||||
|
)
|
||||||
|
|
||||||
|
get_filename_component(RAW_FILE_NAME ${spv_file} NAME)
|
||||||
|
set(FILE_NAME "shader${RAW_FILE_NAME}")
|
||||||
|
string(REPLACE ".comp.spv" ".h" HEADER_FILE ${FILE_NAME})
|
||||||
|
string(TOUPPER ${HEADER_FILE} HEADER_FILE_DEFINE)
|
||||||
|
string(REPLACE "." "_" HEADER_FILE_DEFINE "${HEADER_FILE_DEFINE}")
|
||||||
|
set(OUTPUT_HEADER_FILE "${HEADER_FILE}")
|
||||||
|
message(STATUS "${HEADER_FILE} generating ${HEADER_FILE_DEFINE}")
|
||||||
|
if(CMAKE_GENERATOR MATCHES "Visual Studio")
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
DEPENDS ${spv_file} xxd
|
||||||
|
COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/xxd"
|
||||||
|
)
|
||||||
|
else()
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_BINARY_DIR}/bin/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
||||||
|
DEPENDS ${spv_file} xxd
|
||||||
|
COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/xxd"
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/kompute/CMakeLists.txt")
|
||||||
|
message(STATUS "Kompute found")
|
||||||
|
set(KOMPUTE_OPT_LOG_LEVEL Error CACHE STRING "Kompute log level")
|
||||||
|
add_subdirectory(kompute)
|
||||||
|
|
||||||
|
# Compile our shaders
|
||||||
|
compile_shader(SOURCES
|
||||||
|
kompute-shaders/op_scale.comp
|
||||||
|
kompute-shaders/op_scale_8.comp
|
||||||
|
kompute-shaders/op_add.comp
|
||||||
|
kompute-shaders/op_addrow.comp
|
||||||
|
kompute-shaders/op_mul.comp
|
||||||
|
kompute-shaders/op_mulrow.comp
|
||||||
|
kompute-shaders/op_silu.comp
|
||||||
|
kompute-shaders/op_relu.comp
|
||||||
|
kompute-shaders/op_gelu.comp
|
||||||
|
kompute-shaders/op_softmax.comp
|
||||||
|
kompute-shaders/op_norm.comp
|
||||||
|
kompute-shaders/op_rmsnorm.comp
|
||||||
|
kompute-shaders/op_diagmask.comp
|
||||||
|
kompute-shaders/op_mul_mat_mat_f32.comp
|
||||||
|
kompute-shaders/op_mul_mat_f16.comp
|
||||||
|
kompute-shaders/op_mul_mat_q8_0.comp
|
||||||
|
kompute-shaders/op_mul_mat_q4_0.comp
|
||||||
|
kompute-shaders/op_mul_mat_q4_1.comp
|
||||||
|
kompute-shaders/op_mul_mat_q6_k.comp
|
||||||
|
kompute-shaders/op_getrows_f16.comp
|
||||||
|
kompute-shaders/op_getrows_q4_0.comp
|
||||||
|
kompute-shaders/op_getrows_q4_1.comp
|
||||||
|
kompute-shaders/op_getrows_q6_k.comp
|
||||||
|
kompute-shaders/op_rope_f16.comp
|
||||||
|
kompute-shaders/op_rope_f32.comp
|
||||||
|
kompute-shaders/op_cpy_f16_f16.comp
|
||||||
|
kompute-shaders/op_cpy_f16_f32.comp
|
||||||
|
kompute-shaders/op_cpy_f32_f16.comp
|
||||||
|
kompute-shaders/op_cpy_f32_f32.comp
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a custom target for our generated shaders
|
||||||
|
add_custom_target(generated_shaders DEPENDS
|
||||||
|
shaderop_scale.h
|
||||||
|
shaderop_scale_8.h
|
||||||
|
shaderop_add.h
|
||||||
|
shaderop_addrow.h
|
||||||
|
shaderop_mul.h
|
||||||
|
shaderop_mulrow.h
|
||||||
|
shaderop_silu.h
|
||||||
|
shaderop_relu.h
|
||||||
|
shaderop_gelu.h
|
||||||
|
shaderop_softmax.h
|
||||||
|
shaderop_norm.h
|
||||||
|
shaderop_rmsnorm.h
|
||||||
|
shaderop_diagmask.h
|
||||||
|
shaderop_mul_mat_mat_f32.h
|
||||||
|
shaderop_mul_mat_f16.h
|
||||||
|
shaderop_mul_mat_q8_0.h
|
||||||
|
shaderop_mul_mat_q4_0.h
|
||||||
|
shaderop_mul_mat_q4_1.h
|
||||||
|
shaderop_mul_mat_q6_k.h
|
||||||
|
shaderop_getrows_f16.h
|
||||||
|
shaderop_getrows_q4_0.h
|
||||||
|
shaderop_getrows_q4_1.h
|
||||||
|
shaderop_getrows_q6_k.h
|
||||||
|
shaderop_rope_f16.h
|
||||||
|
shaderop_rope_f32.h
|
||||||
|
shaderop_cpy_f16_f16.h
|
||||||
|
shaderop_cpy_f16_f32.h
|
||||||
|
shaderop_cpy_f32_f16.h
|
||||||
|
shaderop_cpy_f32_f32.h
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a custom command that depends on the generated_shaders
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp
|
||||||
|
DEPENDS generated_shaders
|
||||||
|
COMMENT "Ensuring shaders are generated before compiling ggml-kompute.cpp"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the stamp to the main sources to ensure dependency tracking
|
||||||
|
set(GGML_SOURCES_KOMPUTE ggml-kompute.cpp ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp)
|
||||||
|
set(GGML_HEADERS_KOMPUTE ggml-kompute.h ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp)
|
||||||
|
add_compile_definitions(GGML_USE_KOMPUTE)
|
||||||
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute)
|
||||||
|
set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${CMAKE_BINARY_DIR})
|
||||||
|
else()
|
||||||
|
message(WARNING "Kompute not found")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
function(get_flags CCID CCVER)
|
function(get_flags CCID CCVER)
|
||||||
set(C_FLAGS "")
|
set(C_FLAGS "")
|
||||||
set(CXX_FLAGS "")
|
set(CXX_FLAGS "")
|
||||||
|
@ -758,11 +914,12 @@ add_library(ggml OBJECT
|
||||||
ggml-backend.h
|
ggml-backend.h
|
||||||
ggml-quants.c
|
ggml-quants.c
|
||||||
ggml-quants.h
|
ggml-quants.h
|
||||||
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
|
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
|
||||||
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
|
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
|
||||||
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
|
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
|
||||||
${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI}
|
${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI}
|
||||||
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
|
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
|
||||||
|
${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
|
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
|
||||||
|
|
|
@ -543,9 +543,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
|
|
||||||
params.n_gpu_layers = std::stoi(argv[i]);
|
params.n_gpu_layers = std::stoi(argv[i]);
|
||||||
#else
|
#ifndef LLAMA_SUPPORTS_GPU_OFFLOAD
|
||||||
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
|
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
|
||||||
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
|
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
|
||||||
#endif
|
#endif
|
||||||
|
@ -554,9 +553,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
|
|
||||||
params.n_gpu_layers_draft = std::stoi(argv[i]);
|
params.n_gpu_layers_draft = std::stoi(argv[i]);
|
||||||
#else
|
#ifndef LLAMA_SUPPORTS_GPU_OFFLOAD
|
||||||
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers-draft option will be ignored\n");
|
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers-draft option will be ignored\n");
|
||||||
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
|
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
|
||||||
#endif
|
#endif
|
||||||
|
@ -565,25 +563,44 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifdef GGML_USE_CUBLAS
|
|
||||||
params.main_gpu = std::stoi(argv[i]);
|
params.main_gpu = std::stoi(argv[i]);
|
||||||
#else
|
#ifndef GGML_USE_CUBLAS
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the main GPU has no effect.\n");
|
||||||
#endif
|
#endif // GGML_USE_CUBLAS
|
||||||
|
} else if (arg == "--split-mode" || arg == "-sm") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::string arg_next = argv[i];
|
||||||
|
if (arg_next == "none") {
|
||||||
|
params.split_mode = LLAMA_SPLIT_NONE;
|
||||||
|
} else if (arg_next == "layer") {
|
||||||
|
params.split_mode = LLAMA_SPLIT_LAYER;
|
||||||
|
} else if (arg_next == "row") {
|
||||||
|
params.split_mode = LLAMA_SPLIT_ROW;
|
||||||
|
} else {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
#ifndef GGML_USE_CUBLAS
|
||||||
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
|
||||||
|
#endif // GGML_USE_CUBLAS
|
||||||
} else if (arg == "--tensor-split" || arg == "-ts") {
|
} else if (arg == "--tensor-split" || arg == "-ts") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifdef GGML_USE_CUBLAS
|
|
||||||
std::string arg_next = argv[i];
|
std::string arg_next = argv[i];
|
||||||
|
|
||||||
// split string by , and /
|
// split string by , and /
|
||||||
const std::regex regex{R"([,/]+)"};
|
const std::regex regex{R"([,/]+)"};
|
||||||
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
|
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
|
||||||
std::vector<std::string> split_arg{it, {}};
|
std::vector<std::string> split_arg{it, {}};
|
||||||
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
|
if (split_arg.size() >= LLAMA_MAX_DEVICES) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
|
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
|
||||||
if (i < split_arg.size()) {
|
if (i < split_arg.size()) {
|
||||||
params.tensor_split[i] = std::stof(split_arg[i]);
|
params.tensor_split[i] = std::stof(split_arg[i]);
|
||||||
|
@ -591,14 +608,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
params.tensor_split[i] = 0.0f;
|
params.tensor_split[i] = 0.0f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#ifndef GGML_USE_CUBLAS
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting a tensor split has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS
|
|
||||||
} else if (arg == "--no-mul-mat-q" || arg == "-nommq") {
|
|
||||||
#ifdef GGML_USE_CUBLAS
|
|
||||||
params.mul_mat_q = false;
|
|
||||||
#else
|
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n");
|
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
} else if (arg == "--no-mmap") {
|
} else if (arg == "--no-mmap") {
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
|
@ -909,14 +920,15 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
printf(" number of layers to store in VRAM\n");
|
printf(" number of layers to store in VRAM\n");
|
||||||
printf(" -ngld N, --n-gpu-layers-draft N\n");
|
printf(" -ngld N, --n-gpu-layers-draft N\n");
|
||||||
printf(" number of layers to store in VRAM for the draft model\n");
|
printf(" number of layers to store in VRAM for the draft model\n");
|
||||||
|
printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
|
||||||
|
printf(" how to split the model across multiple GPUs, one of:\n");
|
||||||
|
printf(" - none: use one GPU only\n");
|
||||||
|
printf(" - layer (default): split layers and KV across GPUs\n");
|
||||||
|
printf(" - row: split rows across GPUs\n");
|
||||||
printf(" -ts SPLIT --tensor-split SPLIT\n");
|
printf(" -ts SPLIT --tensor-split SPLIT\n");
|
||||||
printf(" how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
|
||||||
printf(" -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
|
printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
|
||||||
#ifdef GGML_USE_CUBLAS
|
printf(" or for intermediate results and KV (with split-mode = row) (default: %d)\n", params.main_gpu);
|
||||||
printf(" -nommq, --no-mul-mat-q\n");
|
|
||||||
printf(" use " GGML_CUBLAS_NAME " instead of custom mul_mat_q " GGML_CUDA_NAME " kernels.\n");
|
|
||||||
printf(" Not recommended since this is both slower and uses more VRAM.\n");
|
|
||||||
#endif // GGML_USE_CUBLAS
|
|
||||||
#endif
|
#endif
|
||||||
printf(" -gan N, --grp-attn-n N\n");
|
printf(" -gan N, --grp-attn-n N\n");
|
||||||
printf(" group-attention factor (default: %d)\n", params.grp_attn_n);
|
printf(" group-attention factor (default: %d)\n", params.grp_attn_n);
|
||||||
|
@ -1033,6 +1045,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||||
mparams.n_gpu_layers = params.n_gpu_layers;
|
mparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
}
|
}
|
||||||
mparams.main_gpu = params.main_gpu;
|
mparams.main_gpu = params.main_gpu;
|
||||||
|
mparams.split_mode = params.split_mode;
|
||||||
mparams.tensor_split = params.tensor_split;
|
mparams.tensor_split = params.tensor_split;
|
||||||
mparams.use_mmap = params.use_mmap;
|
mparams.use_mmap = params.use_mmap;
|
||||||
mparams.use_mlock = params.use_mlock;
|
mparams.use_mlock = params.use_mlock;
|
||||||
|
|
|
@ -59,6 +59,7 @@ struct gpt_params {
|
||||||
float p_split = 0.1f; // speculative decoding split probability
|
float p_split = 0.1f; // speculative decoding split probability
|
||||||
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
|
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
|
||||||
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
|
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
|
||||||
|
llama_split_mode split_mode = LLAMA_SPLIT_LAYER; // how to split the model across GPUs
|
||||||
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
||||||
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
||||||
int32_t n_beams = 0; // if non-zero then use beam search of given width.
|
int32_t n_beams = 0; // if non-zero then use beam search of given width.
|
||||||
|
|
|
@ -88,7 +88,10 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_default_params();
|
llama_model_params model_params = llama_model_default_params();
|
||||||
|
|
||||||
|
const std::vector<float> t_split (LLAMA_MAX_DEVICES, 0.0f);
|
||||||
|
|
||||||
model_params.n_gpu_layers = n_gpu_layers;
|
model_params.n_gpu_layers = n_gpu_layers;
|
||||||
|
model_params.tensor_split = t_split.data();
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,10 @@
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(GGML_USE_KOMPUTE)
|
||||||
|
#include "ggml-kompute.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
static llama_context ** g_ctx;
|
static llama_context ** g_ctx;
|
||||||
static llama_model ** g_model;
|
static llama_model ** g_model;
|
||||||
static gpt_params * g_params;
|
static gpt_params * g_params;
|
||||||
|
@ -182,6 +186,10 @@ int main(int argc, char ** argv) {
|
||||||
g_model = &model;
|
g_model = &model;
|
||||||
g_ctx = &ctx;
|
g_ctx = &ctx;
|
||||||
|
|
||||||
|
#if defined(GGML_USE_KOMPUTE)
|
||||||
|
ggml_vk_init_device(0, "gpu");
|
||||||
|
#endif
|
||||||
|
|
||||||
// load the model and apply lora adapter, if any
|
// load the model and apply lora adapter, if any
|
||||||
LOG("%s: load the model and apply lora adapter, if any\n", __func__);
|
LOG("%s: load the model and apply lora adapter, if any\n", __func__);
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
|
12
ggml-alloc.c
12
ggml-alloc.c
|
@ -229,6 +229,7 @@ void ggml_tallocr_reset(ggml_tallocr_t alloc) {
|
||||||
alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
|
alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
|
||||||
} else {
|
} else {
|
||||||
alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
|
alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
|
||||||
|
ggml_backend_buffer_reset(alloc->buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -779,10 +780,21 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
|
||||||
|
|
||||||
if (nbytes == 0) {
|
if (nbytes == 0) {
|
||||||
// all the tensors in the context are already allocated
|
// all the tensors in the context are already allocated
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__);
|
||||||
|
#endif
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, nbytes);
|
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, nbytes);
|
||||||
|
if (buffer == NULL) {
|
||||||
|
// failed to allocate buffer
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: failed to allocate buffer\n", __func__);
|
||||||
|
#endif
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
ggml_tallocr_t tallocr = ggml_tallocr_new_from_buffer(buffer);
|
ggml_tallocr_t tallocr = ggml_tallocr_new_from_buffer(buffer);
|
||||||
|
|
||||||
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
||||||
|
|
|
@ -16,9 +16,10 @@ extern "C" {
|
||||||
typedef void * ggml_backend_buffer_type_context_t;
|
typedef void * ggml_backend_buffer_type_context_t;
|
||||||
|
|
||||||
struct ggml_backend_buffer_type_i {
|
struct ggml_backend_buffer_type_i {
|
||||||
|
const char * (*get_name) (ggml_backend_buffer_type_t buft);
|
||||||
ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
|
ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
|
||||||
size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
|
size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
|
||||||
size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
|
size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
|
||||||
bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
|
bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
|
||||||
// check if tensor data is in host memory
|
// check if tensor data is in host memory
|
||||||
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
|
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
|
||||||
|
@ -34,16 +35,17 @@ extern "C" {
|
||||||
typedef void * ggml_backend_buffer_context_t;
|
typedef void * ggml_backend_buffer_context_t;
|
||||||
|
|
||||||
struct ggml_backend_buffer_i {
|
struct ggml_backend_buffer_i {
|
||||||
void (*free_buffer) (ggml_backend_buffer_t buffer);
|
const char * (*get_name) (ggml_backend_buffer_t buffer);
|
||||||
//void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
void (*free_buffer) (ggml_backend_buffer_t buffer);
|
||||||
void * (*get_base) (ggml_backend_buffer_t buffer);
|
void * (*get_base) (ggml_backend_buffer_t buffer);
|
||||||
void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
// (optional) copy tensor between different buffer-type, allow for single-copy tranfers
|
// (optional) copy tensor between different buffer-type, allow for single-copy tranfers
|
||||||
void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
|
void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
|
void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
void (*clear) (ggml_backend_buffer_t buffer, uint8_t value);
|
void (*clear) (ggml_backend_buffer_t buffer, uint8_t value);
|
||||||
|
void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_backend_buffer {
|
struct ggml_backend_buffer {
|
||||||
|
@ -51,6 +53,7 @@ extern "C" {
|
||||||
ggml_backend_buffer_type_t buft;
|
ggml_backend_buffer_type_t buft;
|
||||||
ggml_backend_buffer_context_t context;
|
ggml_backend_buffer_context_t context;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
enum ggml_backend_buffer_usage usage;
|
||||||
};
|
};
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_buffer_init(
|
ggml_backend_buffer_t ggml_backend_buffer_init(
|
||||||
|
@ -79,13 +82,13 @@ extern "C" {
|
||||||
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
|
|
||||||
// (optional) asynchroneous tensor copy
|
// (optional) asynchroneous tensor copy
|
||||||
void (*cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
|
void (*cpy_tensor_from_async)(ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
void (*cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
|
void (*cpy_tensor_to_async) (ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
void (*synchronize)(ggml_backend_t backend);
|
void (*synchronize)(ggml_backend_t backend);
|
||||||
|
|
||||||
// compute graph with a plan
|
// compute graph with a plan
|
||||||
ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
|
||||||
void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
|
|
||||||
|
|
483
ggml-backend.c
483
ggml-backend.c
|
@ -15,6 +15,10 @@
|
||||||
|
|
||||||
// backend buffer type
|
// backend buffer type
|
||||||
|
|
||||||
|
const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return buft->iface.get_name(buft);
|
||||||
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||||
return buft->iface.alloc_buffer(buft, size);
|
return buft->iface.alloc_buffer(buft, size);
|
||||||
}
|
}
|
||||||
|
@ -58,11 +62,16 @@ ggml_backend_buffer_t ggml_backend_buffer_init(
|
||||||
/* .buft = */ buft,
|
/* .buft = */ buft,
|
||||||
/* .context = */ context,
|
/* .context = */ context,
|
||||||
/* .size = */ size,
|
/* .size = */ size,
|
||||||
|
/* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
|
||||||
};
|
};
|
||||||
|
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
|
||||||
|
return buffer->iface.get_name(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
|
void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
|
||||||
if (buffer == NULL) {
|
if (buffer == NULL) {
|
||||||
return;
|
return;
|
||||||
|
@ -94,11 +103,11 @@ void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_t
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer) {
|
size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer) {
|
||||||
return ggml_backend_buft_get_alignment(ggml_backend_buffer_type(buffer));
|
return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||||
return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type(buffer), tensor);
|
return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
||||||
|
@ -106,13 +115,23 @@ void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
|
bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
|
||||||
return ggml_backend_buft_is_host(ggml_backend_buffer_type(buffer));
|
return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer) {
|
void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
|
||||||
|
buffer->usage = usage;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
|
||||||
return buffer->buft;
|
return buffer->buft;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
|
||||||
|
if (buffer->iface.reset) {
|
||||||
|
buffer->iface.reset(buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// backend
|
// backend
|
||||||
|
|
||||||
const char * ggml_backend_name(ggml_backend_t backend) {
|
const char * ggml_backend_name(ggml_backend_t backend) {
|
||||||
|
@ -295,6 +314,12 @@ static void ggml_backend_registry_init(void) {
|
||||||
extern ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
|
extern ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
|
||||||
ggml_backend_register("Metal", ggml_backend_reg_metal_init, ggml_backend_metal_buffer_type(), NULL);
|
ggml_backend_register("Metal", ggml_backend_reg_metal_init, ggml_backend_metal_buffer_type(), NULL);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef GGML_USE_KOMPUTE
|
||||||
|
extern ggml_backend_t ggml_backend_reg_kompute_init(const char * params, void * user_data);
|
||||||
|
extern ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(void);
|
||||||
|
ggml_backend_register("Kompute", ggml_backend_reg_kompute_init, ggml_backend_kompute_buffer_type(), NULL);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) {
|
void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) {
|
||||||
|
@ -392,6 +417,12 @@ ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size) {
|
||||||
|
|
||||||
// backend CPU
|
// backend CPU
|
||||||
|
|
||||||
|
static const char * ggml_backend_cpu_buffer_name(ggml_backend_buffer_t buffer) {
|
||||||
|
return "CPU";
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
|
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||||
return (void *)buffer->context;
|
return (void *)buffer->context;
|
||||||
}
|
}
|
||||||
|
@ -412,13 +443,13 @@ static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, con
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
|
static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
||||||
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
|
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
|
static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
||||||
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
|
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
|
@ -429,6 +460,7 @@ static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
|
static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
|
||||||
|
/* .get_name = */ ggml_backend_cpu_buffer_name,
|
||||||
/* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
|
/* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
|
||||||
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
|
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
|
||||||
/* .init_tensor = */ NULL, // no initialization required
|
/* .init_tensor = */ NULL, // no initialization required
|
||||||
|
@ -437,10 +469,12 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
|
||||||
/* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from,
|
/* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from,
|
||||||
/* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to,
|
/* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to,
|
||||||
/* .clear = */ ggml_backend_cpu_buffer_clear,
|
/* .clear = */ ggml_backend_cpu_buffer_clear,
|
||||||
|
/* .reset = */ NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
// for buffers from ptr, free is not called
|
// for buffers from ptr, free is not called
|
||||||
static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
|
static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
|
||||||
|
/* .get_name = */ ggml_backend_cpu_buffer_name,
|
||||||
/* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
|
/* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
|
||||||
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
|
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
|
||||||
/* .init_tensor = */ NULL, // no initialization required
|
/* .init_tensor = */ NULL, // no initialization required
|
||||||
|
@ -449,10 +483,17 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
|
||||||
/* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from,
|
/* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from,
|
||||||
/* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to,
|
/* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to,
|
||||||
/* .clear = */ ggml_backend_cpu_buffer_clear,
|
/* .clear = */ ggml_backend_cpu_buffer_clear,
|
||||||
|
/* .reset = */ NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
|
static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
|
||||||
|
|
||||||
|
static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return "CPU";
|
||||||
|
|
||||||
|
GGML_UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||||
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
|
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
|
||||||
void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
|
void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
|
||||||
|
@ -483,6 +524,7 @@ static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft
|
||||||
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
|
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
|
||||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
|
||||||
/* .iface = */ {
|
/* .iface = */ {
|
||||||
|
/* .get_name = */ ggml_backend_cpu_buffer_type_get_name,
|
||||||
/* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
|
/* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
|
||||||
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
||||||
|
@ -501,6 +543,18 @@ ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
|
||||||
|
|
||||||
#include <hbwmalloc.h>
|
#include <hbwmalloc.h>
|
||||||
|
|
||||||
|
static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return "CPU_HBM";
|
||||||
|
|
||||||
|
GGML_UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char * ggml_backend_cpu_hbm_buffer_get_name(ggml_backend_buffer_t buf) {
|
||||||
|
return "CPU_HBM";
|
||||||
|
|
||||||
|
GGML_UNUSED(buf);
|
||||||
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||||
hbw_free(buffer->context);
|
hbw_free(buffer->context);
|
||||||
}
|
}
|
||||||
|
@ -514,17 +568,18 @@ static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this is a hack to avoid having to implement a new buffer type
|
|
||||||
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
||||||
buffer->buft = buft;
|
buffer->buft = buft;
|
||||||
|
buffer->iface.get_name = ggml_backend_cpu_hbm_buffer_get_name;
|
||||||
buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
|
buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
|
||||||
|
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type() {
|
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
|
||||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
|
||||||
/* .iface = */ {
|
/* .iface = */ {
|
||||||
|
/* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name,
|
||||||
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
|
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
|
||||||
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
||||||
|
@ -568,7 +623,7 @@ struct ggml_backend_plan_cpu {
|
||||||
struct ggml_cgraph cgraph;
|
struct ggml_cgraph cgraph;
|
||||||
};
|
};
|
||||||
|
|
||||||
static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, const struct ggml_cgraph * cgraph) {
|
||||||
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
||||||
|
|
||||||
struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
|
struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
|
||||||
|
@ -661,7 +716,7 @@ ggml_backend_t ggml_backend_cpu_init(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_backend_is_cpu(ggml_backend_t backend) {
|
bool ggml_backend_is_cpu(ggml_backend_t backend) {
|
||||||
return backend->iface.get_name == ggml_backend_cpu_name;
|
return backend && backend->iface.get_name == ggml_backend_cpu_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
|
void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
|
||||||
|
@ -685,7 +740,7 @@ static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user
|
||||||
|
|
||||||
// scheduler
|
// scheduler
|
||||||
|
|
||||||
#define GGML_MAX_BACKENDS 4
|
#define GGML_MAX_BACKENDS 16
|
||||||
#define GGML_MAX_SPLITS 256
|
#define GGML_MAX_SPLITS 256
|
||||||
#define GGML_MAX_SPLIT_INPUTS 16
|
#define GGML_MAX_SPLIT_INPUTS 16
|
||||||
|
|
||||||
|
@ -695,9 +750,16 @@ struct ggml_backend_sched_split {
|
||||||
int i_end;
|
int i_end;
|
||||||
struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
|
struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
|
||||||
int n_inputs;
|
int n_inputs;
|
||||||
|
// graph view of this split
|
||||||
struct ggml_cgraph graph;
|
struct ggml_cgraph graph;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: group all the hash values into a single struct for clarity
|
||||||
|
//struct sched_hash_value {
|
||||||
|
// ggml_tallocr_t tallocr;
|
||||||
|
// struct ggml_tensor * copies[GGML_MAX_BACKENDS];
|
||||||
|
//};
|
||||||
|
|
||||||
struct ggml_backend_sched {
|
struct ggml_backend_sched {
|
||||||
int n_backends;
|
int n_backends;
|
||||||
ggml_backend_t backends[GGML_MAX_BACKENDS];
|
ggml_backend_t backends[GGML_MAX_BACKENDS];
|
||||||
|
@ -705,11 +767,15 @@ struct ggml_backend_sched {
|
||||||
|
|
||||||
ggml_gallocr_t galloc;
|
ggml_gallocr_t galloc;
|
||||||
|
|
||||||
|
// hash keys of the nodes in the graph
|
||||||
struct ggml_hash_set hash_set;
|
struct ggml_hash_set hash_set;
|
||||||
ggml_tallocr_t * node_talloc; // [hash_set.size]
|
// hash values (arrays of [hash_set.size])
|
||||||
struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS]
|
ggml_tallocr_t * node_talloc; // tallocr assigned to each node (indirectly this is the backend)
|
||||||
|
struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // copies of each node for each destination backend
|
||||||
|
|
||||||
|
// copy of the graph with modified inputs
|
||||||
struct ggml_cgraph * graph;
|
struct ggml_cgraph * graph;
|
||||||
|
|
||||||
struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
|
struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
|
||||||
int n_splits;
|
int n_splits;
|
||||||
|
|
||||||
|
@ -777,7 +843,7 @@ static ggml_backend_t get_allocr_backend(ggml_backend_sched_t sched, ggml_talloc
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
static char causes[GGML_DEFAULT_GRAPH_SIZE*8 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove
|
static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove
|
||||||
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
|
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
|
||||||
#define GET_CAUSE(node) causes[hash_id(node)]
|
#define GET_CAUSE(node) causes[hash_id(node)]
|
||||||
#else
|
#else
|
||||||
|
@ -790,6 +856,7 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct
|
||||||
// if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there
|
// if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there
|
||||||
// ie. kv cache updates
|
// ie. kv cache updates
|
||||||
// note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend.
|
// note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend.
|
||||||
|
|
||||||
// dst
|
// dst
|
||||||
ggml_backend_t cur_backend = get_buffer_backend(sched, node->buffer);
|
ggml_backend_t cur_backend = get_buffer_backend(sched, node->buffer);
|
||||||
if (cur_backend != NULL) {
|
if (cur_backend != NULL) {
|
||||||
|
@ -804,7 +871,6 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct
|
||||||
}
|
}
|
||||||
|
|
||||||
// src
|
// src
|
||||||
int cur_prio = INT_MAX;
|
|
||||||
size_t cur_size = 0;
|
size_t cur_size = 0;
|
||||||
|
|
||||||
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
||||||
|
@ -812,16 +878,20 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct
|
||||||
if (src == NULL) {
|
if (src == NULL) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_t src_backend = get_buffer_backend(sched, src->buffer);
|
ggml_backend_t src_backend = get_buffer_backend(sched, src->buffer);
|
||||||
if (src_backend != NULL) {
|
if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
|
||||||
int src_prio = sched_backend_prio(sched, src_backend);
|
// operations with weights are always on the same backend as the weights
|
||||||
size_t src_size = ggml_nbytes(src);
|
cur_backend = src_backend;
|
||||||
if (src_prio < cur_prio && src_size >= cur_size) {
|
SET_CAUSE(node, "1.wgt%d", i);
|
||||||
cur_prio = src_prio;
|
break;
|
||||||
cur_size = src_size;
|
}
|
||||||
cur_backend = src_backend;
|
|
||||||
SET_CAUSE(node, "1.src%d", i);
|
size_t src_size = ggml_nbytes(src);
|
||||||
}
|
if (src_size >= cur_size) {
|
||||||
|
cur_size = src_size;
|
||||||
|
cur_backend = src_backend;
|
||||||
|
SET_CAUSE(node, "1.src%d", i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cur_backend;
|
return cur_backend;
|
||||||
|
@ -857,7 +927,7 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
|
||||||
}
|
}
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
ggml_tallocr_t node_allocr = node_allocr(node);
|
||||||
ggml_backend_t node_backend = node_allocr ? get_allocr_backend(sched, node_allocr) : NULL; // FIXME:
|
ggml_backend_t node_backend = node_allocr ? get_allocr_backend(sched, node_allocr) : NULL; // FIXME:
|
||||||
fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name,
|
fprintf(stderr, "node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
|
||||||
fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", GET_CAUSE(node));
|
fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", GET_CAUSE(node));
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||||
struct ggml_tensor * src = node->src[j];
|
struct ggml_tensor * src = node->src[j];
|
||||||
|
@ -866,7 +936,7 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
|
||||||
}
|
}
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
ggml_tallocr_t src_allocr = node_allocr(src);
|
||||||
ggml_backend_t src_backend = src_allocr ? get_allocr_backend(sched, src_allocr) : NULL;
|
ggml_backend_t src_backend = src_allocr ? get_allocr_backend(sched, src_allocr) : NULL;
|
||||||
fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name,
|
fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
|
||||||
fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
|
fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
@ -882,14 +952,16 @@ static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, co
|
||||||
return dup;
|
return dup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//#define DEBUG_PASS1
|
||||||
|
//#define DEBUG_PASS2
|
||||||
|
//#define DEBUG_PASS3
|
||||||
|
//#define DEBUG_PASS4
|
||||||
|
|
||||||
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
|
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
|
||||||
// TODO: merge passes
|
// TODO: merge passes
|
||||||
static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
||||||
// reset state
|
// reset splits
|
||||||
size_t hash_size = sched->hash_set.size;
|
|
||||||
memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
|
|
||||||
memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
|
|
||||||
memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
|
|
||||||
sched->n_splits = 0;
|
sched->n_splits = 0;
|
||||||
|
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
|
@ -898,11 +970,13 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
|
||||||
/* .no_alloc = */ true
|
/* .no_alloc = */ true
|
||||||
};
|
};
|
||||||
|
|
||||||
if (sched->ctx != NULL) {
|
ggml_free(sched->ctx);
|
||||||
ggml_free(sched->ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
sched->ctx = ggml_init(params);
|
sched->ctx = ggml_init(params);
|
||||||
|
if (sched->ctx == NULL) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize context\n", __func__);
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
|
||||||
// pass 1: assign backends to ops with allocated inputs
|
// pass 1: assign backends to ops with allocated inputs
|
||||||
for (int i = 0; i < graph->n_leafs; i++) {
|
for (int i = 0; i < graph->n_leafs; i++) {
|
||||||
|
@ -931,45 +1005,91 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
|
||||||
node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend);
|
node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
#ifdef DEBUG_PASS1
|
||||||
|
fprintf(stderr, "PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
||||||
|
#endif
|
||||||
|
|
||||||
// pass 2: assign backends to ops from current assignments
|
// pass 2: assign backends to ops from current assignments
|
||||||
// TODO:
|
// start from the end and assign the same backend to previous ops
|
||||||
// - reuse sched_backend_from_cur
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
// expand gpu backends (i.e. non last prio) up and down, ignoring cpu
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
// thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
if (node_allocr == NULL) {
|
// pass 2.1 expand gpu up
|
||||||
int cur_prio = INT_MAX;
|
{
|
||||||
size_t cur_size = 0;
|
ggml_tallocr_t cur_allocr = NULL;
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
for (int i = graph->n_nodes - 1; i >= 0; i--) {
|
||||||
struct ggml_tensor * src = node->src[j];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
if (src == NULL) {
|
if (ggml_is_view_op(node->op)) {
|
||||||
break;
|
continue;
|
||||||
}
|
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
|
||||||
if (src_allocr != NULL) {
|
|
||||||
int src_prio = sched_allocr_prio(sched, src_allocr);
|
|
||||||
size_t src_size = ggml_nbytes(src);
|
|
||||||
if (src_prio < cur_prio && src_size >= cur_size) {
|
|
||||||
cur_prio = src_prio;
|
|
||||||
cur_size = src_size;
|
|
||||||
node_allocr = src_allocr;
|
|
||||||
SET_CAUSE(node, "2.src%d", j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
ggml_tallocr_t node_allocr = node_allocr(node);
|
||||||
if (node_allocr != NULL) {
|
if (node_allocr != NULL) {
|
||||||
node_allocr(node) = node_allocr;
|
if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
|
||||||
|
// skip cpu
|
||||||
|
cur_allocr = NULL;
|
||||||
|
} else {
|
||||||
|
cur_allocr = node_allocr;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
node_allocr(node) = cur_allocr;
|
||||||
|
SET_CAUSE(node, "2.cur");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
|
||||||
|
|
||||||
// pass 3: assign backends to remaining src from dst (should only be leafs)
|
// pass 2.2 expand gpu down
|
||||||
|
{
|
||||||
|
ggml_tallocr_t cur_allocr = NULL;
|
||||||
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
|
if (ggml_is_view_op(node->op)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
ggml_tallocr_t node_allocr = node_allocr(node);
|
||||||
|
if (node_allocr != NULL) {
|
||||||
|
if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
|
||||||
|
// skip cpu
|
||||||
|
cur_allocr = NULL;
|
||||||
|
} else {
|
||||||
|
cur_allocr = node_allocr;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
node_allocr(node) = cur_allocr;
|
||||||
|
SET_CAUSE(node, "2.cur");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pass 2.3 expand rest up
|
||||||
|
{
|
||||||
|
ggml_tallocr_t cur_allocr = NULL;
|
||||||
|
for (int i = graph->n_nodes - 1; i >= 0; i--) {
|
||||||
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
|
if (ggml_is_view_op(node->op)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
ggml_tallocr_t node_allocr = node_allocr(node);
|
||||||
|
if (node_allocr != NULL) {
|
||||||
|
cur_allocr = node_allocr;
|
||||||
|
} else {
|
||||||
|
node_allocr(node) = cur_allocr;
|
||||||
|
SET_CAUSE(node, "2.cur");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#ifdef DEBUG_PASS2
|
||||||
|
fprintf(stderr, "PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// pass 3: assign backends to remaining src from dst and view_src
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
ggml_tallocr_t cur_allocr = node_allocr(node);
|
||||||
|
if (ggml_is_view_op(node->op) && cur_allocr == NULL) {
|
||||||
|
cur_allocr = node_allocr(node) = node_allocr(node->view_src);
|
||||||
|
SET_CAUSE(node, "3.vsrc");
|
||||||
|
}
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||||
struct ggml_tensor * src = node->src[j];
|
struct ggml_tensor * src = node->src[j];
|
||||||
if (src == NULL) {
|
if (src == NULL) {
|
||||||
|
@ -977,81 +1097,100 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
|
||||||
}
|
}
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
ggml_tallocr_t src_allocr = node_allocr(src);
|
||||||
if (src_allocr == NULL) {
|
if (src_allocr == NULL) {
|
||||||
node_allocr(src) = node_allocr;
|
if (src->view_src != NULL) {
|
||||||
|
// views are always on the same backend as the source
|
||||||
|
node_allocr(src) = node_allocr(src->view_src);
|
||||||
|
} else {
|
||||||
|
node_allocr(src) = cur_allocr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
#ifdef DEBUG_PASS3
|
||||||
|
fprintf(stderr, "PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
||||||
|
#endif
|
||||||
|
|
||||||
// pass 4: split graph, find tensors that need to be copied
|
// pass 4: split graph, find tensors that need to be copied
|
||||||
// TODO:
|
{
|
||||||
// - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost
|
int cur_split = 0;
|
||||||
// find first backend
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
int cur_split = 0;
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
if (node->view_src == NULL) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
sched->splits[0].tallocr = node_allocr(node);
|
||||||
if (node->view_src == NULL) {
|
|
||||||
sched->splits[0].tallocr = node_allocr(node);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sched->splits[0].i_start = 0;
|
|
||||||
sched->splits[0].n_inputs = 0;
|
|
||||||
memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
|
|
||||||
ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
|
|
||||||
size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
|
|
||||||
if (ggml_is_view_op(node->op)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
|
|
||||||
if (node_allocr != cur_allocr) {
|
|
||||||
sched->splits[cur_split].i_end = i;
|
|
||||||
cur_split++;
|
|
||||||
GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
|
|
||||||
sched->splits[cur_split].tallocr = node_allocr;
|
|
||||||
sched->splits[cur_split].i_start = i;
|
|
||||||
sched->splits[cur_split].n_inputs = 0;
|
|
||||||
memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK
|
|
||||||
cur_allocr = node_allocr;
|
|
||||||
cur_backend_id = sched_allocr_prio(sched, cur_allocr);
|
|
||||||
}
|
|
||||||
|
|
||||||
// find inputs that are not on the same backend
|
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
|
||||||
struct ggml_tensor * src = node->src[j];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
}
|
||||||
if (src_allocr != node_allocr) {
|
sched->splits[0].i_start = 0;
|
||||||
int n_inputs = sched->splits[cur_split].n_inputs++;
|
sched->splits[0].n_inputs = 0;
|
||||||
GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
|
memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
|
||||||
sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src;
|
ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
|
||||||
|
size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
|
||||||
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
|
|
||||||
// create copies
|
if (ggml_is_view_op(node->op)) {
|
||||||
size_t id = hash_id(src);
|
continue;
|
||||||
if (sched->node_copies[id][cur_backend_id] == NULL) {
|
}
|
||||||
struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
|
|
||||||
sched->node_copies[id][cur_backend_id] = tensor_copy;
|
ggml_tallocr_t node_allocr = node_allocr(node);
|
||||||
node_allocr(tensor_copy) = cur_allocr;
|
|
||||||
ggml_backend_t backend = get_allocr_backend(sched, cur_allocr);
|
if (node_allocr != cur_allocr) {
|
||||||
ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
|
sched->splits[cur_split].i_end = i;
|
||||||
|
cur_split++;
|
||||||
|
GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
|
||||||
|
sched->splits[cur_split].tallocr = node_allocr;
|
||||||
|
sched->splits[cur_split].i_start = i;
|
||||||
|
sched->splits[cur_split].n_inputs = 0;
|
||||||
|
memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK
|
||||||
|
cur_allocr = node_allocr;
|
||||||
|
cur_backend_id = sched_allocr_prio(sched, cur_allocr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// find inputs that are not on the same backend
|
||||||
|
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||||
|
struct ggml_tensor * src = node->src[j];
|
||||||
|
if (src == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ggml_tallocr_t src_allocr = node_allocr(src);
|
||||||
|
if (src_allocr != node_allocr) {
|
||||||
|
// check if the input is already in the split
|
||||||
|
bool found = false;
|
||||||
|
for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) {
|
||||||
|
if (sched->splits[cur_split].inputs[k] == src) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!found) {
|
||||||
|
int n_inputs = sched->splits[cur_split].n_inputs++;
|
||||||
|
//printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr)));
|
||||||
|
GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
|
||||||
|
sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a copy of the input in the split's backend
|
||||||
|
size_t id = hash_id(src);
|
||||||
|
if (sched->node_copies[id][cur_backend_id] == NULL) {
|
||||||
|
struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
|
||||||
|
sched->node_copies[id][cur_backend_id] = tensor_copy;
|
||||||
|
node_allocr(tensor_copy) = cur_allocr;
|
||||||
|
ggml_backend_t backend = get_allocr_backend(sched, cur_allocr);
|
||||||
|
ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
|
||||||
|
}
|
||||||
|
node->src[j] = sched->node_copies[id][cur_backend_id];
|
||||||
}
|
}
|
||||||
node->src[j] = sched->node_copies[id][cur_backend_id];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
sched->splits[cur_split].i_end = graph->n_nodes;
|
||||||
|
sched->n_splits = cur_split + 1;
|
||||||
}
|
}
|
||||||
sched->splits[cur_split].i_end = graph->n_nodes;
|
#ifdef DEBUG_PASS4
|
||||||
sched->n_splits = cur_split + 1;
|
fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
||||||
|
#endif
|
||||||
|
|
||||||
//fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
|
#ifndef NDEBUG
|
||||||
|
|
||||||
#if 1
|
|
||||||
// sanity check: all sources should have the same backend as the node
|
// sanity check: all sources should have the same backend as the node
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
|
@ -1059,6 +1198,11 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
|
||||||
if (node_allocr == NULL) {
|
if (node_allocr == NULL) {
|
||||||
fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
|
fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
|
||||||
}
|
}
|
||||||
|
if (node->view_src != NULL && node_allocr != node_allocr(node->view_src)) {
|
||||||
|
fprintf(stderr, "!!!!!!! %s has backend %s, view_src %s has backend %s\n",
|
||||||
|
node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
|
||||||
|
node->view_src->name, node_allocr(node->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(node->view_src))) : "NULL");
|
||||||
|
}
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||||
struct ggml_tensor * src = node->src[j];
|
struct ggml_tensor * src = node->src[j];
|
||||||
if (src == NULL) {
|
if (src == NULL) {
|
||||||
|
@ -1070,8 +1214,14 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
|
||||||
node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
|
node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
|
||||||
j, src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL");
|
j, src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL");
|
||||||
}
|
}
|
||||||
|
if (src->view_src != NULL && src_allocr != node_allocr(src->view_src)) {
|
||||||
|
fprintf(stderr, "!!!!!!! [src] %s has backend %s, view_src %s has backend %s\n",
|
||||||
|
src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL",
|
||||||
|
src->view_src->name, node_allocr(src->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(src->view_src))) : "NULL");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fflush(stderr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// create copies of the graph for each split
|
// create copies of the graph for each split
|
||||||
|
@ -1085,6 +1235,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
|
||||||
for (int j = 0; j < split->n_inputs; j++) {
|
for (int j = 0; j < split->n_inputs; j++) {
|
||||||
struct ggml_tensor * input = split->inputs[j];
|
struct ggml_tensor * input = split->inputs[j];
|
||||||
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
|
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
|
||||||
|
// add a dependency to the input source so that it is not freed before the copy is done
|
||||||
input_cpy->src[0] = input;
|
input_cpy->src[0] = input;
|
||||||
graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
|
graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
|
||||||
}
|
}
|
||||||
|
@ -1121,19 +1272,20 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
|
||||||
struct ggml_tensor * input = split->inputs[j];
|
struct ggml_tensor * input = split->inputs[j];
|
||||||
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_backend_prio(sched, split_backend)];
|
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_backend_prio(sched, split_backend)];
|
||||||
if (input->buffer == NULL) {
|
if (input->buffer == NULL) {
|
||||||
|
GGML_ASSERT(false);
|
||||||
if (input->view_src == NULL) {
|
if (input->view_src == NULL) {
|
||||||
fprintf(stderr, "input %s has no buffer and no view_src\n", input->name);
|
fprintf(stderr, "input %s has no buffer and no view_src\n", input->name);
|
||||||
exit(1);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
// FIXME: may need to use the sched buffer instead
|
// FIXME: may need to use the sched buffer instead
|
||||||
ggml_backend_view_init(input->view_src->buffer, input);
|
ggml_backend_view_init(input->view_src->buffer, input);
|
||||||
}
|
}
|
||||||
if (input_cpy->buffer == NULL) {
|
if (input_cpy->buffer == NULL) {
|
||||||
fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name);
|
fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name);
|
||||||
exit(1);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
//GGML_ASSERT(input->buffer->backend != input_cpy->buffer->backend);
|
// TODO: avoid this copy if it was already copied in a previous split, and the input didn't change
|
||||||
//GGML_ASSERT(input_cpy->buffer->backend == split_backend);
|
// this is important to avoid copying constants such as KQ_mask and inp_pos multiple times
|
||||||
ggml_backend_tensor_copy(input, input_cpy);
|
ggml_backend_tensor_copy(input, input_cpy);
|
||||||
}
|
}
|
||||||
// ggml_backend_synchronize(split_backend);
|
// ggml_backend_synchronize(split_backend);
|
||||||
|
@ -1168,13 +1320,23 @@ static void sched_reset(ggml_backend_sched_t sched) {
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
for (int i = 0; i < sched->n_backends; i++) {
|
||||||
ggml_tallocr_reset(sched->tallocs[i]);
|
ggml_tallocr_reset(sched->tallocs[i]);
|
||||||
}
|
}
|
||||||
|
// reset state for the next run
|
||||||
|
size_t hash_size = sched->hash_set.size;
|
||||||
|
memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
|
||||||
|
memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
|
||||||
|
memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) {
|
ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size) {
|
||||||
|
GGML_ASSERT(n_backends > 0);
|
||||||
GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
|
GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
|
||||||
|
|
||||||
struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched));
|
struct ggml_backend_sched * sched = calloc(sizeof(struct ggml_backend_sched), 1);
|
||||||
memset(sched, 0, sizeof(struct ggml_backend_sched));
|
|
||||||
|
// initialize hash table
|
||||||
|
sched->hash_set = ggml_hash_set_new(graph_size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
|
||||||
|
sched->node_talloc = calloc(sizeof(sched->node_talloc[0]) * sched->hash_set.size, 1);
|
||||||
|
sched->node_copies = calloc(sizeof(sched->node_copies[0]) * sched->hash_set.size, 1);
|
||||||
|
|
||||||
sched->n_backends = n_backends;
|
sched->n_backends = n_backends;
|
||||||
for (int i = 0; i < n_backends; i++) {
|
for (int i = 0; i < n_backends; i++) {
|
||||||
|
@ -1199,6 +1361,7 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) {
|
||||||
ggml_tallocr_free(sched->tallocs[i]);
|
ggml_tallocr_free(sched->tallocs[i]);
|
||||||
}
|
}
|
||||||
ggml_gallocr_free(sched->galloc);
|
ggml_gallocr_free(sched->galloc);
|
||||||
|
ggml_free(sched->ctx);
|
||||||
free(sched->hash_set.keys);
|
free(sched->hash_set.keys);
|
||||||
free(sched->node_talloc);
|
free(sched->node_talloc);
|
||||||
free(sched->node_copies);
|
free(sched->node_copies);
|
||||||
|
@ -1206,12 +1369,7 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
|
void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
|
||||||
// initialize hash tables
|
GGML_ASSERT(ggml_tallocr_is_measure(sched->tallocs[0])); // can only be initialized once
|
||||||
size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS;
|
|
||||||
sched->hash_set.size = hash_size;
|
|
||||||
sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size);
|
|
||||||
sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size);
|
|
||||||
sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size);
|
|
||||||
|
|
||||||
sched_split_graph(sched, measure_graph);
|
sched_split_graph(sched, measure_graph);
|
||||||
sched_alloc_splits(sched);
|
sched_alloc_splits(sched);
|
||||||
|
@ -1227,7 +1385,7 @@ void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgr
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
||||||
GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
|
GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
|
||||||
|
|
||||||
sched_split_graph(sched, graph);
|
sched_split_graph(sched, graph);
|
||||||
sched_alloc_splits(sched);
|
sched_alloc_splits(sched);
|
||||||
|
@ -1235,13 +1393,19 @@ void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cg
|
||||||
sched_reset(sched);
|
sched_reset(sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
|
||||||
|
return sched->n_splits;
|
||||||
|
}
|
||||||
|
|
||||||
ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
||||||
int backend_index = sched_backend_prio(sched, backend);
|
int backend_index = sched_backend_prio(sched, backend);
|
||||||
|
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
||||||
return sched->tallocs[backend_index];
|
return sched->tallocs[backend_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
||||||
int backend_index = sched_backend_prio(sched, backend);
|
int backend_index = sched_backend_prio(sched, backend);
|
||||||
|
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
||||||
return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
|
return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1252,9 +1416,10 @@ void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml
|
||||||
}
|
}
|
||||||
|
|
||||||
// utils
|
// utils
|
||||||
|
|
||||||
void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||||
GGML_ASSERT(tensor->buffer == NULL);
|
GGML_ASSERT(tensor->buffer == NULL);
|
||||||
//GGML_ASSERT(tensor->data == NULL); // views of pre-allocted tensors may have the data set, but still need to be initialized
|
//GGML_ASSERT(tensor->data == NULL); // views of pre-allocated tensors may have the data set in ggml_new_tensor, but still need to be initialized by the backend
|
||||||
GGML_ASSERT(tensor->view_src != NULL);
|
GGML_ASSERT(tensor->view_src != NULL);
|
||||||
GGML_ASSERT(tensor->view_src->buffer != NULL);
|
GGML_ASSERT(tensor->view_src->buffer != NULL);
|
||||||
GGML_ASSERT(tensor->view_src->data != NULL);
|
GGML_ASSERT(tensor->view_src->data != NULL);
|
||||||
|
@ -1320,6 +1485,7 @@ static void graph_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor
|
||||||
|
|
||||||
struct ggml_tensor * dst = node_copies[id];
|
struct ggml_tensor * dst = node_copies[id];
|
||||||
if (dst->view_src != NULL) {
|
if (dst->view_src != NULL) {
|
||||||
|
graph_init_tensor(hash_set, node_copies, node_init, src->view_src);
|
||||||
ggml_backend_view_init(dst->view_src->buffer, dst);
|
ggml_backend_view_init(dst->view_src->buffer, dst);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1353,6 +1519,21 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
|
||||||
struct ggml_context * ctx_allocated = ggml_init(params);
|
struct ggml_context * ctx_allocated = ggml_init(params);
|
||||||
struct ggml_context * ctx_unallocated = ggml_init(params);
|
struct ggml_context * ctx_unallocated = ggml_init(params);
|
||||||
|
|
||||||
|
if (ctx_allocated == NULL || ctx_unallocated == NULL) {
|
||||||
|
fprintf(stderr, "failed to allocate context for graph copy\n");
|
||||||
|
free(hash_set.keys);
|
||||||
|
free(node_copies);
|
||||||
|
free(node_init);
|
||||||
|
ggml_free(ctx_allocated);
|
||||||
|
ggml_free(ctx_unallocated);
|
||||||
|
return (struct ggml_backend_graph_copy) {
|
||||||
|
/* .buffer = */ NULL,
|
||||||
|
/* .ctx_allocated = */ NULL,
|
||||||
|
/* .ctx_unallocated = */ NULL,
|
||||||
|
/* .graph = */ NULL,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// dup nodes
|
// dup nodes
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
|
@ -1361,6 +1542,20 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
|
||||||
|
|
||||||
// allocate nodes
|
// allocate nodes
|
||||||
ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
|
ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
|
||||||
|
if (buffer == NULL) {
|
||||||
|
fprintf(stderr, "failed to allocate buffer for graph copy\n");
|
||||||
|
free(hash_set.keys);
|
||||||
|
free(node_copies);
|
||||||
|
free(node_init);
|
||||||
|
ggml_free(ctx_allocated);
|
||||||
|
ggml_free(ctx_unallocated);
|
||||||
|
return (struct ggml_backend_graph_copy) {
|
||||||
|
/* .buffer = */ NULL,
|
||||||
|
/* .ctx_allocated = */ NULL,
|
||||||
|
/* .ctx_unallocated = */ NULL,
|
||||||
|
/* .graph = */ NULL,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
//printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
|
//printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
|
||||||
|
|
||||||
|
@ -1397,8 +1592,12 @@ void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
|
||||||
ggml_free(copy.ctx_unallocated);
|
ggml_free(copy.ctx_unallocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
|
bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
|
||||||
struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
|
struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
|
||||||
|
if (copy.buffer == NULL) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
struct ggml_cgraph * g1 = graph;
|
struct ggml_cgraph * g1 = graph;
|
||||||
struct ggml_cgraph * g2 = copy.graph;
|
struct ggml_cgraph * g2 = copy.graph;
|
||||||
|
|
||||||
|
@ -1428,4 +1627,6 @@ void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_graph_copy_free(copy);
|
ggml_backend_graph_copy_free(copy);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,22 +17,32 @@ extern "C" {
|
||||||
//
|
//
|
||||||
|
|
||||||
// buffer type
|
// buffer type
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
|
GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft);
|
||||||
GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
|
GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
|
||||||
GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
|
GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
|
||||||
GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
|
GGML_API size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
|
||||||
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
|
GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
|
||||||
|
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
|
||||||
|
|
||||||
// buffer
|
// buffer
|
||||||
GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
|
enum ggml_backend_buffer_usage {
|
||||||
GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
|
GGML_BACKEND_BUFFER_USAGE_ANY = 0,
|
||||||
GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
|
GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
|
||||||
GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
};
|
||||||
GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer);
|
||||||
GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
|
GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
|
||||||
GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
|
GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer);
|
GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
|
GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
|
GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
|
||||||
|
GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer);
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Backend
|
// Backend
|
||||||
|
@ -140,24 +150,23 @@ extern "C" {
|
||||||
typedef struct ggml_backend_sched * ggml_backend_sched_t;
|
typedef struct ggml_backend_sched * ggml_backend_sched_t;
|
||||||
|
|
||||||
// Initialize a backend scheduler
|
// Initialize a backend scheduler
|
||||||
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends);
|
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size);
|
||||||
|
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
||||||
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
|
||||||
|
|
||||||
// Initialize backend buffers from a measure graph
|
// Initialize backend buffers from a measure graph
|
||||||
GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
||||||
|
// Get the number of splits of the last graph
|
||||||
|
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
|
GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
|
GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
|
||||||
|
|
||||||
GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
||||||
|
|
||||||
// Allocate a graph on the backend scheduler
|
// Allocate and compute graph on the backend scheduler
|
||||||
GGML_API void ggml_backend_sched_graph_compute(
|
GGML_API void ggml_backend_sched_graph_compute(
|
||||||
ggml_backend_sched_t sched,
|
ggml_backend_sched_t sched,
|
||||||
struct ggml_cgraph * graph);
|
struct ggml_cgraph * graph);
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Utils
|
// Utils
|
||||||
//
|
//
|
||||||
|
@ -176,7 +185,7 @@ extern "C" {
|
||||||
typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
|
typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
|
||||||
|
|
||||||
// Compare the output of two backends
|
// Compare the output of two backends
|
||||||
GGML_API void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
|
GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
|
||||||
|
|
||||||
// Tensor initialization
|
// Tensor initialization
|
||||||
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
|
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
|
||||||
|
|
828
ggml-cuda.cu
828
ggml-cuda.cu
File diff suppressed because it is too large
Load diff
26
ggml-cuda.h
26
ggml-cuda.h
|
@ -27,22 +27,6 @@ GGML_API void * ggml_cuda_host_malloc(size_t size);
|
||||||
GGML_API void ggml_cuda_host_free(void * ptr);
|
GGML_API void ggml_cuda_host_free(void * ptr);
|
||||||
|
|
||||||
GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split);
|
|
||||||
GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
GGML_API void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset);
|
|
||||||
GGML_API void ggml_cuda_copy_to_device(struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
GGML_API void ggml_cuda_set_main_device(int main_device);
|
|
||||||
GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
|
|
||||||
GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size);
|
|
||||||
GGML_API void ggml_cuda_free_scratch(void);
|
|
||||||
GGML_API bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
GGML_API bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
GGML_API int ggml_cuda_get_device_count(void);
|
GGML_API int ggml_cuda_get_device_count(void);
|
||||||
|
@ -52,13 +36,17 @@ GGML_API void ggml_cuda_get_device_description(int device, char * description,
|
||||||
GGML_API ggml_backend_t ggml_backend_cuda_init(int device);
|
GGML_API ggml_backend_t ggml_backend_cuda_init(int device);
|
||||||
|
|
||||||
GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend);
|
GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend);
|
||||||
GGML_API int ggml_backend_cuda_get_device(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
|
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
|
||||||
|
// split tensor buffer that splits matrices by rows across multiple devices
|
||||||
// pinned host buffer for use with CPU backend for faster copies between CPU and GPU
|
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
|
||||||
|
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
|
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
|
||||||
|
|
||||||
|
GGML_API int ggml_backend_cuda_get_device_count(void);
|
||||||
|
GGML_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
|
||||||
|
GGML_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -228,6 +228,8 @@ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
|
||||||
#define GGML_HASHTABLE_FULL ((size_t)-1)
|
#define GGML_HASHTABLE_FULL ((size_t)-1)
|
||||||
#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
||||||
|
|
||||||
|
struct ggml_hash_set ggml_hash_set_new(size_t size);
|
||||||
|
|
||||||
bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
||||||
|
|
||||||
// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
||||||
|
|
1878
ggml-kompute.cpp
Normal file
1878
ggml-kompute.cpp
Normal file
File diff suppressed because it is too large
Load diff
69
ggml-kompute.h
Normal file
69
ggml-kompute.h
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
struct ggml_kompute_context;
|
||||||
|
|
||||||
|
namespace vk {
|
||||||
|
class DeviceMemory;
|
||||||
|
class Buffer;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_vk_memory {
|
||||||
|
void *data = nullptr;
|
||||||
|
size_t size = 0;
|
||||||
|
vk::DeviceMemory *primaryMemory = nullptr;
|
||||||
|
vk::Buffer *primaryBuffer = nullptr;
|
||||||
|
vk::DeviceMemory *stagingMemory = nullptr;
|
||||||
|
vk::Buffer *stagingBuffer = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_vk_device {
|
||||||
|
int index = 0;
|
||||||
|
int type = 0; // same as VkPhysicalDeviceType
|
||||||
|
size_t heapSize = 0;
|
||||||
|
std::string name;
|
||||||
|
std::string vendor;
|
||||||
|
int subgroupSize = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<ggml_vk_device> ggml_vk_available_devices(size_t memoryRequired);
|
||||||
|
bool ggml_vk_init_device(size_t memoryRequired, const std::string &device);
|
||||||
|
bool ggml_vk_init_device(const ggml_vk_device &device);
|
||||||
|
bool ggml_vk_init_device(int device);
|
||||||
|
bool ggml_vk_free_device();
|
||||||
|
bool ggml_vk_has_vulkan();
|
||||||
|
bool ggml_vk_has_device();
|
||||||
|
bool ggml_vk_using_vulkan();
|
||||||
|
ggml_vk_device ggml_vk_current_device();
|
||||||
|
struct ggml_kompute_context * ggml_vk_init(void);
|
||||||
|
void ggml_vk_free(struct ggml_kompute_context * ctx);
|
||||||
|
void ggml_vk_free_memory(ggml_vk_memory &memory);
|
||||||
|
|
||||||
|
void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph * gf);
|
||||||
|
|
||||||
|
//
|
||||||
|
// backend API
|
||||||
|
// user-code should use only these functions
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// forward declaration
|
||||||
|
typedef struct ggml_backend * ggml_backend_t;
|
||||||
|
|
||||||
|
GGML_API ggml_backend_t ggml_backend_kompute_init(void);
|
||||||
|
|
||||||
|
GGML_API bool ggml_backend_is_kompute(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(void);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
39
ggml-metal.m
39
ggml-metal.m
|
@ -2482,10 +2482,10 @@ static void ggml_backend_metal_free_device(void) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
|
static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) {
|
||||||
struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
|
return "Metal";
|
||||||
|
|
||||||
return ctx->all_data;
|
UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||||
|
@ -2503,6 +2503,12 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer)
|
||||||
free(ctx);
|
free(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||||
|
struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
|
||||||
|
|
||||||
|
return ctx->all_data;
|
||||||
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
||||||
memcpy((char *)tensor->data + offset, data, size);
|
memcpy((char *)tensor->data + offset, data, size);
|
||||||
|
|
||||||
|
@ -2515,13 +2521,13 @@ static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, c
|
||||||
UNUSED(buffer);
|
UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
|
static void ggml_backend_metal_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
||||||
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
|
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
|
||||||
|
|
||||||
UNUSED(buffer);
|
UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
|
static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
||||||
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
|
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
|
||||||
|
|
||||||
UNUSED(buffer);
|
UNUSED(buffer);
|
||||||
|
@ -2534,6 +2540,7 @@ static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
|
static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
|
||||||
|
/* .get_name = */ ggml_backend_metal_buffer_get_name,
|
||||||
/* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
|
/* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
|
||||||
/* .get_base = */ ggml_backend_metal_buffer_get_base,
|
/* .get_base = */ ggml_backend_metal_buffer_get_base,
|
||||||
/* .init_tensor = */ NULL,
|
/* .init_tensor = */ NULL,
|
||||||
|
@ -2542,10 +2549,17 @@ static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
|
||||||
/* .cpy_tensor_from = */ ggml_backend_metal_buffer_cpy_tensor_from,
|
/* .cpy_tensor_from = */ ggml_backend_metal_buffer_cpy_tensor_from,
|
||||||
/* .cpy_tensor_to = */ ggml_backend_metal_buffer_cpy_tensor_to,
|
/* .cpy_tensor_to = */ ggml_backend_metal_buffer_cpy_tensor_to,
|
||||||
/* .clear = */ ggml_backend_metal_buffer_clear,
|
/* .clear = */ ggml_backend_metal_buffer_clear,
|
||||||
|
/* .reset = */ NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
// default buffer type
|
// default buffer type
|
||||||
|
|
||||||
|
static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return "Metal";
|
||||||
|
|
||||||
|
UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||||
struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
|
struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
|
||||||
|
|
||||||
|
@ -2618,6 +2632,7 @@ static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t bu
|
||||||
ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
|
ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
|
||||||
static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
|
static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
|
||||||
/* .iface = */ {
|
/* .iface = */ {
|
||||||
|
/* .get_name = */ ggml_backend_metal_buffer_type_get_name,
|
||||||
/* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
|
/* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
|
||||||
/* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
|
/* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
||||||
|
@ -2641,6 +2656,14 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz
|
||||||
ctx->n_buffers = 0;
|
ctx->n_buffers = 0;
|
||||||
|
|
||||||
const size_t size_page = sysconf(_SC_PAGESIZE);
|
const size_t size_page = sysconf(_SC_PAGESIZE);
|
||||||
|
|
||||||
|
// page-align the data ptr
|
||||||
|
{
|
||||||
|
const uintptr_t offs = (uintptr_t) data % size_page;
|
||||||
|
data = (void *) ((char *) data - offs);
|
||||||
|
size += offs;
|
||||||
|
}
|
||||||
|
|
||||||
size_t size_aligned = size;
|
size_t size_aligned = size;
|
||||||
if ((size_aligned % size_page) != 0) {
|
if ((size_aligned % size_page) != 0) {
|
||||||
size_aligned += (size_page - (size_aligned % size_page));
|
size_aligned += (size_page - (size_aligned % size_page));
|
||||||
|
@ -2741,7 +2764,7 @@ static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct
|
||||||
UNUSED(backend);
|
UNUSED(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ggml_backend_i metal_backend_i = {
|
static struct ggml_backend_i ggml_backend_metal_i = {
|
||||||
/* .get_name = */ ggml_backend_metal_name,
|
/* .get_name = */ ggml_backend_metal_name,
|
||||||
/* .free = */ ggml_backend_metal_free,
|
/* .free = */ ggml_backend_metal_free,
|
||||||
/* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
|
/* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
|
||||||
|
@ -2767,7 +2790,7 @@ ggml_backend_t ggml_backend_metal_init(void) {
|
||||||
ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
|
ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
|
||||||
|
|
||||||
*metal_backend = (struct ggml_backend) {
|
*metal_backend = (struct ggml_backend) {
|
||||||
/* .interface = */ metal_backend_i,
|
/* .interface = */ ggml_backend_metal_i,
|
||||||
/* .context = */ ctx,
|
/* .context = */ ctx,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2775,7 +2798,7 @@ ggml_backend_t ggml_backend_metal_init(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_backend_is_metal(ggml_backend_t backend) {
|
bool ggml_backend_is_metal(ggml_backend_t backend) {
|
||||||
return backend->iface.get_name == ggml_backend_metal_name;
|
return backend && backend->iface.get_name == ggml_backend_metal_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
|
void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
|
||||||
|
|
335
ggml-opencl.cpp
335
ggml-opencl.cpp
|
@ -1,5 +1,6 @@
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml-opencl.h"
|
#include "ggml-opencl.h"
|
||||||
|
#include "ggml-backend-impl.h"
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
@ -10,7 +11,7 @@
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#define CL_TARGET_OPENCL_VERSION 110
|
#define CL_TARGET_OPENCL_VERSION 120
|
||||||
#include <clblast.h>
|
#include <clblast.h>
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
|
@ -929,6 +930,11 @@ static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, co
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cl_init(void) {
|
void ggml_cl_init(void) {
|
||||||
|
static bool initialized = false;
|
||||||
|
if (initialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
cl_int err;
|
cl_int err;
|
||||||
|
|
||||||
struct cl_device;
|
struct cl_device;
|
||||||
|
@ -1483,8 +1489,8 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
} else {
|
} else {
|
||||||
d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
|
d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
|
||||||
}
|
}
|
||||||
cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
|
cl_mem d_Y = src1->backend == GGML_BACKEND_GPU ? (cl_mem) src1->extra : ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
|
||||||
cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
|
cl_mem d_D = dst->backend == GGML_BACKEND_GPU ? (cl_mem) dst->extra : ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
|
||||||
|
|
||||||
size_t x_offset = 0;
|
size_t x_offset = 0;
|
||||||
|
|
||||||
|
@ -1501,7 +1507,9 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
|
|
||||||
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
|
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
|
||||||
// copy src1 to device
|
// copy src1 to device
|
||||||
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
|
if (src1->backend == GGML_BACKEND_CPU) {
|
||||||
|
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
|
||||||
|
}
|
||||||
|
|
||||||
CL_CHECK(clFinish(queue));
|
CL_CHECK(clFinish(queue));
|
||||||
|
|
||||||
|
@ -1522,8 +1530,10 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy dst to host
|
// copy dst to host
|
||||||
float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
|
if (dst->backend == GGML_BACKEND_CPU) {
|
||||||
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
|
float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
|
||||||
|
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1532,8 +1542,12 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
if (src0->backend != GGML_BACKEND_GPU) {
|
if (src0->backend != GGML_BACKEND_GPU) {
|
||||||
ggml_cl_pool_free(d_X, x_size);
|
ggml_cl_pool_free(d_X, x_size);
|
||||||
}
|
}
|
||||||
ggml_cl_pool_free(d_Y, y_size);
|
if (src1->backend != GGML_BACKEND_GPU) {
|
||||||
ggml_cl_pool_free(d_D, d_size);
|
ggml_cl_pool_free(d_Y, y_size);
|
||||||
|
}
|
||||||
|
if (dst->backend != GGML_BACKEND_GPU) {
|
||||||
|
ggml_cl_pool_free(d_D, d_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t wsize) {
|
static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t wsize) {
|
||||||
|
@ -1598,6 +1612,8 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
|
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: convert on device
|
||||||
|
|
||||||
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
|
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
|
||||||
// convert src1 to fp16
|
// convert src1 to fp16
|
||||||
// TODO: use multiple threads
|
// TODO: use multiple threads
|
||||||
|
@ -1643,11 +1659,13 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy dst to host, then convert to float
|
// copy dst to host, then convert to float
|
||||||
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
|
if (dst->backend == GGML_BACKEND_CPU) {
|
||||||
|
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
|
||||||
float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
|
float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
|
||||||
|
ggml_fp16_to_fp32_row(tmp, d, d_ne);
|
||||||
ggml_fp16_to_fp32_row(tmp, d, d_ne);
|
} else {
|
||||||
|
// FIXME: convert dst to fp32 on device
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1801,7 +1819,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
|
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst) {
|
||||||
const int64_t ne10 = src1->ne[0];
|
const int64_t ne10 = src1->ne[0];
|
||||||
|
|
||||||
const int64_t ne0 = dst->ne[0];
|
const int64_t ne0 = dst->ne[0];
|
||||||
|
@ -1895,3 +1913,292 @@ void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) {
|
||||||
tensor->extra = dst;
|
tensor->extra = dst;
|
||||||
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
|
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ggml-backend
|
||||||
|
|
||||||
|
// buffer
|
||||||
|
|
||||||
|
struct ggml_backend_opencl_buffer_context {
|
||||||
|
~ggml_backend_opencl_buffer_context() {
|
||||||
|
if (buffer) {
|
||||||
|
clReleaseMemObject(buffer);
|
||||||
|
}
|
||||||
|
for (auto * sub_buffer : sub_buffers) {
|
||||||
|
clReleaseMemObject(sub_buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cl_mem buffer;
|
||||||
|
std::vector<cl_mem> sub_buffers;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void * const cl_ptr_base = (void *)(uintptr_t) 0x1000;
|
||||||
|
|
||||||
|
static const char * ggml_backend_opencl_buffer_get_name(ggml_backend_buffer_t buffer) {
|
||||||
|
return "OpenCL";
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||||
|
ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
|
||||||
|
delete ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||||
|
return cl_ptr_base;
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
|
||||||
|
if (tensor->view_src != NULL && tensor->view_offs == 0) {
|
||||||
|
tensor->extra = tensor->view_src->extra;
|
||||||
|
} else {
|
||||||
|
ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
|
||||||
|
cl_buffer_region region = {(size_t)((char *)tensor->data - (char *)cl_ptr_base), ggml_nbytes(tensor)};
|
||||||
|
cl_int err;
|
||||||
|
cl_mem sub_buffer = clCreateSubBuffer(ctx->buffer, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
|
||||||
|
CL_CHECK(err);
|
||||||
|
ctx->sub_buffers.push_back(sub_buffer);
|
||||||
|
tensor->extra = sub_buffer;
|
||||||
|
}
|
||||||
|
tensor->backend = GGML_BACKEND_GPU;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
||||||
|
cl_mem tensor_buffer = (cl_mem) tensor->extra;
|
||||||
|
CL_CHECK(clEnqueueWriteBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL));
|
||||||
|
CL_CHECK(clFinish(queue));
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
||||||
|
cl_mem tensor_buffer = (cl_mem) tensor->extra;
|
||||||
|
CL_CHECK(clEnqueueReadBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL));
|
||||||
|
CL_CHECK(clFinish(queue));
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
||||||
|
ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
|
||||||
|
CL_CHECK(clEnqueueFillBuffer(queue, ctx->buffer, &value, sizeof(value), 0, buffer->size, 0, NULL, NULL));
|
||||||
|
CL_CHECK(clFinish(queue));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_buffer_reset(ggml_backend_buffer_t buffer) {
|
||||||
|
ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
|
||||||
|
for (auto * sub_buffer : ctx->sub_buffers) {
|
||||||
|
clReleaseMemObject(sub_buffer);
|
||||||
|
}
|
||||||
|
ctx->sub_buffers.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = {
|
||||||
|
/* .get_name = */ ggml_backend_opencl_buffer_get_name,
|
||||||
|
/* .free_buffer = */ ggml_backend_opencl_buffer_free_buffer,
|
||||||
|
/* .get_base = */ ggml_backend_opencl_buffer_get_base,
|
||||||
|
/* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor,
|
||||||
|
/* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor,
|
||||||
|
/* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor,
|
||||||
|
/* .cpy_tensor_from = */ NULL,
|
||||||
|
/* .cpy_tensor_to = */ NULL,
|
||||||
|
/* .clear = */ ggml_backend_opencl_buffer_clear,
|
||||||
|
/* .reset = */ ggml_backend_opencl_buffer_reset,
|
||||||
|
};
|
||||||
|
|
||||||
|
// buffer type
|
||||||
|
|
||||||
|
static const char * ggml_backend_opencl_buffer_type_name(ggml_backend_buffer_type_t buffer_type) {
|
||||||
|
return "OpenCL";
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_t ggml_backend_opencl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buffer_type, size_t size) {
|
||||||
|
ggml_cl_init();
|
||||||
|
|
||||||
|
cl_int err;
|
||||||
|
cl_mem mem = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
|
||||||
|
if (err != CL_SUCCESS) {
|
||||||
|
fprintf(stderr, "%s: failed to allocate %.2f MiB\n", __func__, size / 1024.0 / 1024.0);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_opencl_buffer_context * ctx = new ggml_backend_opencl_buffer_context{mem, {}};
|
||||||
|
|
||||||
|
return ggml_backend_buffer_init(buffer_type, ggml_backend_opencl_buffer_interface, ctx, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) {
|
||||||
|
// FIXME: not thread safe, device may not be initialized yet
|
||||||
|
static cl_uint alignment = -1;
|
||||||
|
if (alignment == (cl_uint)-1) {
|
||||||
|
ggml_cl_init();
|
||||||
|
clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &alignment, NULL);
|
||||||
|
}
|
||||||
|
return alignment;
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ggml_backend_opencl_buffer_type_supports_backend(ggml_backend_buffer_type_t buffer_type, ggml_backend_t backend) {
|
||||||
|
//return ggml_backend_is_opencl(backend); // opencl must be used through the cpu backend
|
||||||
|
return ggml_backend_is_cpu(backend);
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_type_i ggml_backend_opencl_buffer_type_interface = {
|
||||||
|
/* .get_name = */ ggml_backend_opencl_buffer_type_name,
|
||||||
|
/* .alloc_buffer = */ ggml_backend_opencl_buffer_type_alloc_buffer,
|
||||||
|
/* .get_alignment = */ ggml_backend_opencl_buffer_type_get_alignment,
|
||||||
|
/* .get_alloc_size = */ NULL,
|
||||||
|
/* .supports_backend = */ ggml_backend_opencl_buffer_type_supports_backend,
|
||||||
|
/* .is_host = */ NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type() {
|
||||||
|
static ggml_backend_buffer_type buffer_type = {
|
||||||
|
/* .iface = */ ggml_backend_opencl_buffer_type_interface,
|
||||||
|
/* .context = */ nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
|
return &buffer_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// host buffer type
|
||||||
|
|
||||||
|
static const char * ggml_backend_opencl_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return "CL_Host";
|
||||||
|
|
||||||
|
GGML_UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char * ggml_backend_opencl_host_buffer_name(ggml_backend_buffer_t buffer) {
|
||||||
|
return "CL_Host";
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||||
|
ggml_cl_host_free(buffer->context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_t ggml_backend_opencl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||||
|
void * ptr = ggml_cl_host_malloc(size);
|
||||||
|
|
||||||
|
if (ptr == nullptr) {
|
||||||
|
// fallback to cpu buffer
|
||||||
|
return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
||||||
|
buffer->buft = buft;
|
||||||
|
buffer->iface.get_name = ggml_backend_opencl_host_buffer_name;
|
||||||
|
buffer->iface.free_buffer = ggml_backend_opencl_host_buffer_free_buffer;
|
||||||
|
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type() {
|
||||||
|
static struct ggml_backend_buffer_type ggml_backend_opencl_buffer_type_host = {
|
||||||
|
/* .iface = */ {
|
||||||
|
/* .get_name = */ ggml_backend_opencl_host_buffer_type_name,
|
||||||
|
/* .alloc_buffer = */ ggml_backend_opencl_host_buffer_type_alloc_buffer,
|
||||||
|
/* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
|
||||||
|
/* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
|
||||||
|
/* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend,
|
||||||
|
/* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
|
||||||
|
},
|
||||||
|
/* .context = */ nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
|
return &ggml_backend_opencl_buffer_type_host;
|
||||||
|
}
|
||||||
|
|
||||||
|
// backend
|
||||||
|
|
||||||
|
static const char * ggml_backend_opencl_name(ggml_backend_t backend) {
|
||||||
|
return "OpenCL";
|
||||||
|
|
||||||
|
GGML_UNUSED(backend);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_opencl_free(ggml_backend_t backend) {
|
||||||
|
GGML_UNUSED(backend);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_type_t ggml_backend_opencl_get_default_buffer_type(ggml_backend_t backend) {
|
||||||
|
return ggml_backend_opencl_buffer_type();
|
||||||
|
|
||||||
|
GGML_UNUSED(backend);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) {
|
||||||
|
for (int i = 0; i < graph->n_nodes; ++i) {
|
||||||
|
ggml_tensor * node = graph->nodes[i];
|
||||||
|
switch (node->op) {
|
||||||
|
case GGML_OP_MUL_MAT:
|
||||||
|
ggml_cl_mul_mat(node->src[0], node->src[1], node, nullptr, 0);
|
||||||
|
break;
|
||||||
|
case GGML_OP_MUL:
|
||||||
|
ggml_cl_mul(node->src[0], node->src[1], node);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
|
||||||
|
GGML_UNUSED(backend);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ggml_backend_opencl_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
|
||||||
|
switch (op->op) {
|
||||||
|
case GGML_OP_MUL_MAT:
|
||||||
|
return ggml_cl_can_mul_mat(op->src[0], op->src[1], op);
|
||||||
|
case GGML_OP_MUL:
|
||||||
|
// return ggml_can_repeat_rows(op->src[1], op->src[0]);
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_UNUSED(backend);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_i opencl_backend_i = {
|
||||||
|
/* .get_name = */ ggml_backend_opencl_name,
|
||||||
|
/* .free = */ ggml_backend_opencl_free,
|
||||||
|
/* .get_default_buffer_type = */ ggml_backend_opencl_get_default_buffer_type,
|
||||||
|
/* .set_tensor_async = */ NULL,
|
||||||
|
/* .get_tensor_async = */ NULL,
|
||||||
|
/* .cpy_tensor_from_async = */ NULL,
|
||||||
|
/* .cpy_tensor_to_async = */ NULL,
|
||||||
|
/* .synchronize = */ NULL,
|
||||||
|
/* .graph_plan_create = */ NULL,
|
||||||
|
/* .graph_plan_free = */ NULL,
|
||||||
|
/* .graph_plan_compute = */ NULL,
|
||||||
|
/* .graph_compute = */ ggml_backend_opencl_graph_compute,
|
||||||
|
/* .supports_op = */ ggml_backend_opencl_supports_op,
|
||||||
|
};
|
||||||
|
|
||||||
|
ggml_backend_t ggml_backend_opencl_init() {
|
||||||
|
ggml_backend_t backend = new ggml_backend {
|
||||||
|
/* .interface = */ opencl_backend_i,
|
||||||
|
/* .context = */ nullptr
|
||||||
|
};
|
||||||
|
|
||||||
|
return backend;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ggml_backend_is_opencl(ggml_backend_t backend) {
|
||||||
|
return backend && backend->iface.get_name == ggml_backend_opencl_name;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -9,17 +10,26 @@ extern "C" {
|
||||||
GGML_API void ggml_cl_init(void);
|
GGML_API void ggml_cl_init(void);
|
||||||
|
|
||||||
GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);
|
||||||
GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
||||||
|
|
||||||
GGML_API void * ggml_cl_host_malloc(size_t size);
|
// GGML_API void * ggml_cl_host_malloc(size_t size);
|
||||||
GGML_API void ggml_cl_host_free(void * ptr);
|
// GGML_API void ggml_cl_host_free(void * ptr);
|
||||||
|
|
||||||
GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
||||||
|
|
||||||
GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
|
// backend API
|
||||||
|
|
||||||
|
// GGML_API ggml_backend_t ggml_backend_opencl_init(void);
|
||||||
|
|
||||||
|
// GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
|
||||||
|
// GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
30
ggml.c
30
ggml.c
|
@ -2336,6 +2336,10 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_free(struct ggml_context * ctx) {
|
void ggml_free(struct ggml_context * ctx) {
|
||||||
|
if (ctx == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// make this function thread safe
|
// make this function thread safe
|
||||||
ggml_critical_section_start();
|
ggml_critical_section_start();
|
||||||
|
|
||||||
|
@ -4351,6 +4355,23 @@ struct ggml_tensor * ggml_cpy_inplace(
|
||||||
return ggml_cpy_impl(ctx, a, b, true);
|
return ggml_cpy_impl(ctx, a, b, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_cast(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
enum ggml_type type) {
|
||||||
|
bool is_node = false;
|
||||||
|
|
||||||
|
struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
|
||||||
|
ggml_format_name(result, "%s (copy)", a->name);
|
||||||
|
|
||||||
|
result->op = GGML_OP_CPY;
|
||||||
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
result->src[0] = a;
|
||||||
|
result->src[1] = result;
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
// ggml_cont
|
// ggml_cont
|
||||||
|
|
||||||
static struct ggml_tensor * ggml_cont_impl(
|
static struct ggml_tensor * ggml_cont_impl(
|
||||||
|
@ -14851,7 +14872,7 @@ size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tenso
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ggml_hash_set ggml_hash_set_new(size_t size) {
|
struct ggml_hash_set ggml_hash_set_new(size_t size) {
|
||||||
size = ggml_hash_size(size);
|
size = ggml_hash_size(size);
|
||||||
struct ggml_hash_set result;
|
struct ggml_hash_set result;
|
||||||
result.size = size;
|
result.size = size;
|
||||||
|
@ -16600,7 +16621,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
||||||
return GGML_EXIT_SUCCESS;
|
return GGML_EXIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
|
struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) {
|
||||||
if (n_threads <= 0) {
|
if (n_threads <= 0) {
|
||||||
n_threads = GGML_DEFAULT_N_THREADS;
|
n_threads = GGML_DEFAULT_N_THREADS;
|
||||||
}
|
}
|
||||||
|
@ -16662,14 +16683,15 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
{
|
{
|
||||||
|
cur = 0;
|
||||||
const struct ggml_tensor * src0 = node->src[2];
|
const struct ggml_tensor * src0 = node->src[2];
|
||||||
const struct ggml_tensor * src1 = node->src[1];
|
const struct ggml_tensor * src1 = node->src[1];
|
||||||
const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
|
const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
|
||||||
if (src1->type != vec_dot_type) {
|
if (src1->type != vec_dot_type) {
|
||||||
cur = ggml_row_size(vec_dot_type, ggml_nelements(src1));
|
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
|
||||||
}
|
}
|
||||||
const int n_as = ggml_get_op_params_i32(node, 1);
|
const int n_as = ggml_get_op_params_i32(node, 1);
|
||||||
cur = GGML_PAD(cur, sizeof(int64_t)); // align
|
cur += GGML_PAD(cur, sizeof(int64_t)); // align
|
||||||
cur += n_as * sizeof(int64_t); // matrix_row_counts
|
cur += n_as * sizeof(int64_t); // matrix_row_counts
|
||||||
cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows
|
cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows
|
||||||
} break;
|
} break;
|
||||||
|
|
9
ggml.h
9
ggml.h
|
@ -1167,6 +1167,11 @@ extern "C" {
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_cast(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
enum ggml_type type);
|
||||||
|
|
||||||
// make contiguous
|
// make contiguous
|
||||||
GGML_API struct ggml_tensor * ggml_cont(
|
GGML_API struct ggml_tensor * ggml_cont(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
|
@ -1849,8 +1854,8 @@ extern "C" {
|
||||||
|
|
||||||
// ggml_graph_plan() has to be called before ggml_graph_compute()
|
// ggml_graph_plan() has to be called before ggml_graph_compute()
|
||||||
// when plan.work_size > 0, caller must allocate memory for plan.work_data
|
// when plan.work_size > 0, caller must allocate memory for plan.work_data
|
||||||
GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
|
GGML_API struct ggml_cplan ggml_graph_plan (const struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
|
||||||
GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
|
GGML_API int ggml_graph_compute( struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
|
||||||
|
|
||||||
// same as ggml_graph_compute() but the work data is allocated as a part of the context
|
// same as ggml_graph_compute() but the work data is allocated as a part of the context
|
||||||
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
|
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
|
||||||
|
|
1
kompute
Submodule
1
kompute
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 4565194ed7c32d1d2efa32ceab4d3c6cae006306
|
97
kompute-shaders/common.comp
Normal file
97
kompute-shaders/common.comp
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
#extension GL_EXT_shader_16bit_storage: require
|
||||||
|
#extension GL_EXT_shader_8bit_storage: require
|
||||||
|
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
|
||||||
|
#extension GL_EXT_shader_explicit_arithmetic_types_int8: require
|
||||||
|
#extension GL_EXT_shader_explicit_arithmetic_types_int16: require
|
||||||
|
#extension GL_EXT_control_flow_attributes: enable
|
||||||
|
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||||
|
#extension GL_EXT_debug_printf : enable
|
||||||
|
|
||||||
|
#define QK4_0 32
|
||||||
|
#define QK4_1 32
|
||||||
|
|
||||||
|
#define GELU_COEF_A 0.044715
|
||||||
|
#define SQRT_2_OVER_PI 0.79788456080286535587989211986876
|
||||||
|
#define TWOPI_F 6.283185307179586f
|
||||||
|
|
||||||
|
#define QK_K 256
|
||||||
|
|
||||||
|
#define u8BufToU16(buf, idx) (((uint16_t(buf[idx + 1]) << 8)) | buf[idx])
|
||||||
|
#define u8BufToFloat16(buf, idx) uint16BitsToHalf u8BufToU16(buf, idx)
|
||||||
|
#define u8BufToU32(buf, idx) (((uint32_t u8BufToU16(buf, idx + 2) << 8 | buf[idx + 1]) << 8) | buf[idx])
|
||||||
|
#define u8BufToFloat(buf, idx) uintBitsToFloat u8BufToU32(buf, idx)
|
||||||
|
|
||||||
|
#define sizeof_block_q4_0 0x12
|
||||||
|
struct block_q4_0 {
|
||||||
|
float16_t d;
|
||||||
|
uint8_t qs[QK4_0 / 2];
|
||||||
|
};
|
||||||
|
mat4 dequantize_q4_0(const block_q4_0 xb, uint il) {
|
||||||
|
const float d1 = il != 0 ? (xb.d / 16.f) : xb.d;
|
||||||
|
const float d2 = d1 / 256.f;
|
||||||
|
const float md = -8.f * xb.d;
|
||||||
|
const uint16_t mask0 = il != 0 ? uint16_t(0x00F0) : uint16_t(0x000F);
|
||||||
|
const uint16_t mask1 = mask0 << 8;
|
||||||
|
|
||||||
|
mat4 reg;
|
||||||
|
for (int i=0;i<8;i++) {
|
||||||
|
uint16_t b = (uint16_t(xb.qs[2 * i + 1]) << 8) | uint16_t(xb.qs[2 * i]);
|
||||||
|
reg[i/2][2*(i%2)+0] = d1 * (b & mask0) + md;
|
||||||
|
reg[i/2][2*(i%2)+1] = d2 * (b & mask1) + md;
|
||||||
|
}
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define sizeof_block_q4_1 0x14
|
||||||
|
struct block_q4_1 {
|
||||||
|
float16_t d;
|
||||||
|
float16_t m;
|
||||||
|
uint8_t qs[QK4_1 / 2];
|
||||||
|
};
|
||||||
|
mat4 dequantize_q4_1(const block_q4_1 xb, uint il) {
|
||||||
|
const float d1 = il != 0 ? (xb.d / 16.f) : xb.d;
|
||||||
|
const float d2 = d1 / 256.f;
|
||||||
|
const float m = xb.m;
|
||||||
|
const uint16_t mask0 = il != 0 ? uint16_t(0x00F0) : uint16_t(0x000F);
|
||||||
|
const uint16_t mask1 = mask0 << 8;
|
||||||
|
|
||||||
|
mat4 reg;
|
||||||
|
for (int i=0;i<8;i++) {
|
||||||
|
uint16_t b = (uint16_t(xb.qs[2 * i + 1]) << 8) | uint16_t(xb.qs[2 * i]);
|
||||||
|
reg[i/2][2*(i%2)+0] = ((b & mask0) * d1) + m;
|
||||||
|
reg[i/2][2*(i%2)+1] = ((b & mask1) * d2) + m;
|
||||||
|
}
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define sizeof_block_q6_k 210
|
||||||
|
struct block_q6_k {
|
||||||
|
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
||||||
|
uint8_t qh[QK_K/4]; // quants, upper 2 bits
|
||||||
|
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
|
||||||
|
float16_t d; // super-block scale
|
||||||
|
};
|
||||||
|
mat4 dequantize_q6_k(const block_q6_k xb, uint il) {
|
||||||
|
const float16_t d_all = xb.d;
|
||||||
|
uint8_t ql[QK_K/2];
|
||||||
|
uint8_t qh[QK_K/4];
|
||||||
|
int8_t scales[QK_K/16];
|
||||||
|
|
||||||
|
const uint qlIndex = 64*(il/8) + 32*((il/2)&1) + 16*(il&1);
|
||||||
|
const uint qhIndex = 32*(il/8) + 16*(il&1);
|
||||||
|
float16_t sc = xb.scales[(il%2) + 2 * ((il/2))];
|
||||||
|
il = (il/2) & 3;
|
||||||
|
|
||||||
|
const uint16_t kmask1 = il>1 ? uint16_t(il>2 ? 192 : 48) : uint16_t(il>0 ? 12 : 3);
|
||||||
|
const uint16_t kmask2 = il>1 ? uint8_t(0xF0) : uint8_t(0x0F);
|
||||||
|
const float16_t coef = il>1 ? float16_t(1.f/16.f) : float16_t(1.f);
|
||||||
|
const float16_t ml = float16_t(d_all * sc * 32.f);
|
||||||
|
const float16_t dl = float16_t(d_all * sc * coef);
|
||||||
|
mat4 reg;
|
||||||
|
for (int i = 0; i < 16; ++i) {
|
||||||
|
const float16_t q = (il&1) != 0 ? ((ql[qlIndex + i] & kmask2) | ((qh[qhIndex + i] & kmask1) << 2))
|
||||||
|
: ((ql[qlIndex + i] & kmask2) | ((qh[qhIndex + i] & kmask1) << 4));
|
||||||
|
reg[i/4][i%4] = dl * q - ml;
|
||||||
|
}
|
||||||
|
return reg;
|
||||||
|
}
|
58
kompute-shaders/op_add.comp
Normal file
58
kompute-shaders/op_add.comp
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1024) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
|
||||||
|
layout(binding = 1) buffer restrict readonly tensorInB { float inB[]; };
|
||||||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int nb00;
|
||||||
|
int nb01;
|
||||||
|
int nb02;
|
||||||
|
int nb03;
|
||||||
|
int ne10;
|
||||||
|
int ne11;
|
||||||
|
int ne12;
|
||||||
|
int ne13;
|
||||||
|
int nb10;
|
||||||
|
int nb11;
|
||||||
|
int nb12;
|
||||||
|
int nb13;
|
||||||
|
int ne0;
|
||||||
|
int nb0;
|
||||||
|
int nb1;
|
||||||
|
int nb2;
|
||||||
|
int nb3;
|
||||||
|
//int offs; // TODO: needed for GGML_OP_ACC, see metal code
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
// general-purpose kernel for addition of two tensors
|
||||||
|
// pros: works for non-contiguous tensors, supports broadcast across dims 1, 2 and 3
|
||||||
|
// cons: not very efficient
|
||||||
|
void main() {
|
||||||
|
const uint i03 = gl_WorkGroupID.z;
|
||||||
|
const uint i02 = gl_WorkGroupID.y;
|
||||||
|
const uint i01 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const uint i13 = i03 % pcs.ne13;
|
||||||
|
const uint i12 = i02 % pcs.ne12;
|
||||||
|
const uint i11 = i01 % pcs.ne11;
|
||||||
|
|
||||||
|
int offs = 0; // TMP (see above)
|
||||||
|
|
||||||
|
uint src0_off = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + offs) / 4);
|
||||||
|
uint src1_off = uint((i13*pcs.nb13 + i12*pcs.nb12 + i11*pcs.nb11 ) / 4);
|
||||||
|
uint dst_off = uint((i03*pcs.nb3 + i02*pcs.nb2 + i01*pcs.nb1 + offs) / 4);
|
||||||
|
|
||||||
|
for (uint i0 = gl_LocalInvocationID.x; i0 < pcs.ne0; i0 += gl_WorkGroupSize.x) {
|
||||||
|
const uint i10 = i0 % pcs.ne10;
|
||||||
|
out_[pcs.outOff + dst_off + i0] = inA[pcs.inAOff + src0_off + i0] + inB[pcs.inBOff + src1_off + i10];
|
||||||
|
}
|
||||||
|
}
|
25
kompute-shaders/op_addrow.comp
Normal file
25
kompute-shaders/op_addrow.comp
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
|
||||||
|
layout(binding = 1) buffer restrict readonly tensorInB { float inB[]; };
|
||||||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
uint row;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 4;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 4; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
out_[i + pcs.outOff] = inA[i + pcs.inAOff] + inB[(i % pcs.row) + pcs.inBOff];
|
||||||
|
}
|
||||||
|
}
|
52
kompute-shaders/op_cpy_f16_f16.comp
Normal file
52
kompute-shaders/op_cpy_f16_f16.comp
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define IN_TYPE float16_t
|
||||||
|
#define IN_TYPE_SIZE 2
|
||||||
|
#define OUT_TYPE float16_t
|
||||||
|
#define OUT_TYPE_SIZE 2
|
||||||
|
|
||||||
|
layout(local_size_x = 1024) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorIn { IN_TYPE in_[]; };
|
||||||
|
layout (binding = 1) writeonly buffer tensorOut { OUT_TYPE out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
int ne02;
|
||||||
|
uint nb00;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb03;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne2;
|
||||||
|
uint nb0;
|
||||||
|
uint nb1;
|
||||||
|
uint nb2;
|
||||||
|
uint nb3;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i03 = gl_WorkGroupID.z;
|
||||||
|
const uint i02 = gl_WorkGroupID.y;
|
||||||
|
const uint i01 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const int n = int(i03)*pcs.ne02*pcs.ne01*pcs.ne00 + int(i02)*pcs.ne01*pcs.ne00 + int(i01)*pcs.ne00;
|
||||||
|
|
||||||
|
const int i3 = n / (pcs.ne2*pcs.ne1*pcs.ne0);
|
||||||
|
const int i2 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0) / (pcs.ne1*pcs.ne0);
|
||||||
|
const int i1 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0) / pcs.ne0;
|
||||||
|
const int i0 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0 - i1*pcs.ne0);
|
||||||
|
|
||||||
|
const uint dst_data = (i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / OUT_TYPE_SIZE + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
const uint src = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + i00*pcs.nb00) / IN_TYPE_SIZE) + pcs.inOff; // Based from in_
|
||||||
|
out_[dst_data+i00] = OUT_TYPE(in_[src]);
|
||||||
|
}
|
||||||
|
}
|
52
kompute-shaders/op_cpy_f16_f32.comp
Normal file
52
kompute-shaders/op_cpy_f16_f32.comp
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define IN_TYPE float16_t
|
||||||
|
#define IN_TYPE_SIZE 2
|
||||||
|
#define OUT_TYPE float
|
||||||
|
#define OUT_TYPE_SIZE 4
|
||||||
|
|
||||||
|
layout(local_size_x = 1024) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorIn { IN_TYPE in_[]; };
|
||||||
|
layout (binding = 1) writeonly buffer tensorOut { OUT_TYPE out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
int ne02;
|
||||||
|
uint nb00;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb03;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne2;
|
||||||
|
uint nb0;
|
||||||
|
uint nb1;
|
||||||
|
uint nb2;
|
||||||
|
uint nb3;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i03 = gl_WorkGroupID.z;
|
||||||
|
const uint i02 = gl_WorkGroupID.y;
|
||||||
|
const uint i01 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const int n = int(i03)*pcs.ne02*pcs.ne01*pcs.ne00 + int(i02)*pcs.ne01*pcs.ne00 + int(i01)*pcs.ne00;
|
||||||
|
|
||||||
|
const int i3 = n / (pcs.ne2*pcs.ne1*pcs.ne0);
|
||||||
|
const int i2 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0) / (pcs.ne1*pcs.ne0);
|
||||||
|
const int i1 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0) / pcs.ne0;
|
||||||
|
const int i0 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0 - i1*pcs.ne0);
|
||||||
|
|
||||||
|
const uint dst_data = (i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / OUT_TYPE_SIZE + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
const uint src = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + i00*pcs.nb00) / IN_TYPE_SIZE) + pcs.inOff; // Based from in_
|
||||||
|
out_[dst_data+i00] = OUT_TYPE(in_[src]);
|
||||||
|
}
|
||||||
|
}
|
52
kompute-shaders/op_cpy_f32_f16.comp
Normal file
52
kompute-shaders/op_cpy_f32_f16.comp
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define IN_TYPE float
|
||||||
|
#define IN_TYPE_SIZE 4
|
||||||
|
#define OUT_TYPE float16_t
|
||||||
|
#define OUT_TYPE_SIZE 2
|
||||||
|
|
||||||
|
layout(local_size_x = 1024) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorIn { IN_TYPE in_[]; };
|
||||||
|
layout (binding = 1) writeonly buffer tensorOut { OUT_TYPE out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
int ne02;
|
||||||
|
uint nb00;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb03;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne2;
|
||||||
|
uint nb0;
|
||||||
|
uint nb1;
|
||||||
|
uint nb2;
|
||||||
|
uint nb3;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i03 = gl_WorkGroupID.z;
|
||||||
|
const uint i02 = gl_WorkGroupID.y;
|
||||||
|
const uint i01 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const int n = int(i03)*pcs.ne02*pcs.ne01*pcs.ne00 + int(i02)*pcs.ne01*pcs.ne00 + int(i01)*pcs.ne00;
|
||||||
|
|
||||||
|
const int i3 = n / (pcs.ne2*pcs.ne1*pcs.ne0);
|
||||||
|
const int i2 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0) / (pcs.ne1*pcs.ne0);
|
||||||
|
const int i1 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0) / pcs.ne0;
|
||||||
|
const int i0 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0 - i1*pcs.ne0);
|
||||||
|
|
||||||
|
const uint dst_data = (i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / OUT_TYPE_SIZE + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
const uint src = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + i00*pcs.nb00) / IN_TYPE_SIZE) + pcs.inOff; // Based from in_
|
||||||
|
out_[dst_data+i00] = OUT_TYPE(in_[src]);
|
||||||
|
}
|
||||||
|
}
|
52
kompute-shaders/op_cpy_f32_f32.comp
Normal file
52
kompute-shaders/op_cpy_f32_f32.comp
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define IN_TYPE float
|
||||||
|
#define IN_TYPE_SIZE 4
|
||||||
|
#define OUT_TYPE float
|
||||||
|
#define OUT_TYPE_SIZE 4
|
||||||
|
|
||||||
|
layout(local_size_x = 1024) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorIn { IN_TYPE in_[]; };
|
||||||
|
layout (binding = 1) writeonly buffer tensorOut { OUT_TYPE out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
int ne02;
|
||||||
|
uint nb00;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb03;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne2;
|
||||||
|
uint nb0;
|
||||||
|
uint nb1;
|
||||||
|
uint nb2;
|
||||||
|
uint nb3;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i03 = gl_WorkGroupID.z;
|
||||||
|
const uint i02 = gl_WorkGroupID.y;
|
||||||
|
const uint i01 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const int n = int(i03)*pcs.ne02*pcs.ne01*pcs.ne00 + int(i02)*pcs.ne01*pcs.ne00 + int(i01)*pcs.ne00;
|
||||||
|
|
||||||
|
const int i3 = n / (pcs.ne2*pcs.ne1*pcs.ne0);
|
||||||
|
const int i2 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0) / (pcs.ne1*pcs.ne0);
|
||||||
|
const int i1 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0) / pcs.ne0;
|
||||||
|
const int i0 = (n - i3*pcs.ne2*pcs.ne1*pcs.ne0 - i2*pcs.ne1*pcs.ne0 - i1*pcs.ne0);
|
||||||
|
|
||||||
|
const uint dst_data = (i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / OUT_TYPE_SIZE + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
const uint src = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + i00*pcs.nb00) / IN_TYPE_SIZE) + pcs.inOff; // Based from in_
|
||||||
|
out_[dst_data+i00] = OUT_TYPE(in_[src]);
|
||||||
|
}
|
||||||
|
}
|
30
kompute-shaders/op_diagmask.comp
Normal file
30
kompute-shaders/op_diagmask.comp
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
uint n_past;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i02 = gl_WorkGroupID.z;
|
||||||
|
const uint i01 = gl_WorkGroupID.y;
|
||||||
|
const uint i00 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const uint index = i02*pcs.ne01*pcs.ne00 + i01*pcs.ne00 + i00;
|
||||||
|
|
||||||
|
if (i00 > pcs.n_past + i01) {
|
||||||
|
out_[index + pcs.outOff] = uintBitsToFloat(0xFF800000);
|
||||||
|
} else {
|
||||||
|
out_[index + pcs.outOff] = in_[index + pcs.inOff];
|
||||||
|
}
|
||||||
|
}
|
22
kompute-shaders/op_gelu.comp
Normal file
22
kompute-shaders/op_gelu.comp
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 8;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 8; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
const float y = in_[i + pcs.inOff];
|
||||||
|
out_[i + pcs.outOff] = 0.5*y*(1.0 + tanh(SQRT_2_OVER_PI*y*(1.0 + GELU_COEF_A*y*y)));
|
||||||
|
}
|
||||||
|
}
|
17
kompute-shaders/op_getrows.comp
Normal file
17
kompute-shaders/op_getrows.comp
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
void main() {
|
||||||
|
const uint i = gl_WorkGroupID.x;
|
||||||
|
const int r = inB[i + pcs.inBOff];
|
||||||
|
|
||||||
|
int z = 0;
|
||||||
|
for (uint ind = gl_LocalInvocationID.x; ind < pcs.ne00/16; ind += gl_WorkGroupSize.x) {
|
||||||
|
const uint inIndex = (r * pcs.nb01 + pcs.inAOff) + ind/NL * SIZE_OF_BLOCK;
|
||||||
|
const mat4 result = dequantize_block(inIndex, ind%NL);
|
||||||
|
for (uint j = 0; j < 4; ++j) {
|
||||||
|
for (uint k = 0; k < 4; ++k) {
|
||||||
|
const uint outIndex = i * pcs.nb1/BYTES_FOR_TYPE + pcs.outOff + z;
|
||||||
|
out_[outIndex] = result[j][k];
|
||||||
|
++z;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
31
kompute-shaders/op_getrows_f16.comp
Normal file
31
kompute-shaders/op_getrows_f16.comp
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { float16_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { int inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int nb01;
|
||||||
|
int nb1;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void dequantize_row_f16(uint x /*Based from inA unaligned*/, uint y /*Based from out_*/, int k) {
|
||||||
|
for (int j = 0; j < k; j++) {
|
||||||
|
out_[y + j] = inA[x + j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i = gl_WorkGroupID.x;
|
||||||
|
const int r = inB[i + pcs.inBOff];
|
||||||
|
|
||||||
|
dequantize_row_f16(r*pcs.nb01/2/*bytes for float16*/ + pcs.inAOff, i*pcs.nb1 + pcs.outOff, pcs.ne00);
|
||||||
|
}
|
38
kompute-shaders/op_getrows_q4_0.comp
Normal file
38
kompute-shaders/op_getrows_q4_0.comp
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define NL 2
|
||||||
|
#define BYTES_FOR_TYPE 4 /*bytes for float*/
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q4_0
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { int inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int nb01;
|
||||||
|
int nb1;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
block_q4_0 get_unaligned_block_q4_0(uint index) {
|
||||||
|
block_q4_0 fres;
|
||||||
|
fres.d = u8BufToFloat16(inA, index);
|
||||||
|
[[unroll]] for (uint it = 0; it != QK4_0 / 2; it++) {
|
||||||
|
fres.qs[it] = inA[index+2+it];
|
||||||
|
}
|
||||||
|
return fres;
|
||||||
|
}
|
||||||
|
|
||||||
|
mat4 dequantize_block(uint index, uint il) {
|
||||||
|
const block_q4_0 block = get_unaligned_block_q4_0(index);
|
||||||
|
return dequantize_q4_0(block, il);
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "op_getrows.comp"
|
39
kompute-shaders/op_getrows_q4_1.comp
Normal file
39
kompute-shaders/op_getrows_q4_1.comp
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define NL 2
|
||||||
|
#define BYTES_FOR_TYPE 4 /*bytes for float*/
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q4_1
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { int inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int nb01;
|
||||||
|
int nb1;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
block_q4_1 get_unaligned_block_q4_1(uint index) {
|
||||||
|
block_q4_1 fres;
|
||||||
|
fres.d = u8BufToFloat16(inA, index);
|
||||||
|
fres.m = u8BufToFloat16(inA, index+2);
|
||||||
|
[[unroll]] for (uint it = 0; it != QK4_1 / 2; it++) {
|
||||||
|
fres.qs[it] = inA[index+4+it];
|
||||||
|
}
|
||||||
|
return fres;
|
||||||
|
}
|
||||||
|
|
||||||
|
mat4 dequantize_block(uint index, uint il) {
|
||||||
|
const block_q4_1 block = get_unaligned_block_q4_1(index);
|
||||||
|
return dequantize_q4_1(block, il);
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "op_getrows.comp"
|
44
kompute-shaders/op_getrows_q6_k.comp
Normal file
44
kompute-shaders/op_getrows_q6_k.comp
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define NL 16
|
||||||
|
#define BYTES_FOR_TYPE 4 /*bytes for float*/
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q6_k
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { int inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int nb01;
|
||||||
|
int nb1;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
block_q6_k get_unaligned_block_q6_k(uint index) {
|
||||||
|
block_q6_k fres;
|
||||||
|
[[unroll]] for (uint it = 0; it != QK_K / 2; it++) {
|
||||||
|
fres.ql[it] = inA[index + it];
|
||||||
|
}
|
||||||
|
[[unroll]] for (uint it = 0; it != QK_K / 4; it++) {
|
||||||
|
fres.qh[it] = inA[index + QK_K/2 + it];
|
||||||
|
}
|
||||||
|
[[unroll]] for (uint it = 0; it != QK_K / 16; it++) {
|
||||||
|
fres.scales[it] = int8_t(inA[index + QK_K/2 + QK_K/4 + it]);
|
||||||
|
}
|
||||||
|
fres.d = u8BufToFloat16(inA, index + QK_K/2 + QK_K/4 + QK_K/16);
|
||||||
|
return fres;
|
||||||
|
}
|
||||||
|
|
||||||
|
mat4 dequantize_block(uint index, uint il) {
|
||||||
|
const block_q6_k block = get_unaligned_block_q6_k(index);
|
||||||
|
return dequantize_q6_k(block, il);
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "op_getrows.comp"
|
24
kompute-shaders/op_mul.comp
Normal file
24
kompute-shaders/op_mul.comp
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
|
||||||
|
layout(binding = 1) buffer restrict readonly tensorInB { float inB[]; };
|
||||||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 4;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 4; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
out_[i + pcs.outOff] = inA[i + pcs.inAOff] * inB[(i) + pcs.inBOff];
|
||||||
|
}
|
||||||
|
}
|
48
kompute-shaders/op_mul_mat_f16.comp
Normal file
48
kompute-shaders/op_mul_mat_f16.comp
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||||
|
|
||||||
|
layout(local_size_x_id = 0) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { float16_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb11;
|
||||||
|
uint nb12;
|
||||||
|
uint ne02;
|
||||||
|
uint ne12;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint r0 = gl_WorkGroupID.x;
|
||||||
|
const uint r1 = gl_WorkGroupID.y;
|
||||||
|
const uint im = gl_WorkGroupID.z;
|
||||||
|
|
||||||
|
uint bc_ab = pcs.ne12 > pcs.ne02 ? im / (pcs.ne12 / pcs.ne02) : im;
|
||||||
|
uint bc_ba = pcs.ne02 > pcs.ne12 ? im / (pcs.ne02 / pcs.ne12) : im;
|
||||||
|
|
||||||
|
const uint x = (r0*pcs.nb01 + bc_ab*pcs.nb02) / 2 + pcs.inAOff; // Based from inA
|
||||||
|
const uint y = (r1*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||||
|
|
||||||
|
float sumf = 0.0f;
|
||||||
|
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
|
||||||
|
sumf += float(inA[x+i]) * float(inB[y+i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const float all_sum = subgroupAdd(sumf);
|
||||||
|
if (subgroupElect()) {
|
||||||
|
out_[im*pcs.ne1*pcs.ne0 + r1*pcs.ne0 + r0 + pcs.outOff] = all_sum;
|
||||||
|
}
|
||||||
|
}
|
51
kompute-shaders/op_mul_mat_mat_f32.comp
Normal file
51
kompute-shaders/op_mul_mat_mat_f32.comp
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||||
|
#extension GL_EXT_debug_printf : enable
|
||||||
|
|
||||||
|
// device subgroup size
|
||||||
|
layout (local_size_x_id = 0) in;
|
||||||
|
|
||||||
|
layout(binding = 0) readonly buffer tensorInA { float inA[]; };
|
||||||
|
layout(binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||||
|
layout(binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
int ne02;
|
||||||
|
int ne11;
|
||||||
|
int ne12;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb11;
|
||||||
|
uint nb12;
|
||||||
|
uint nb1;
|
||||||
|
uint nb2;
|
||||||
|
}
|
||||||
|
pcs;
|
||||||
|
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
uvec3 gid = gl_WorkGroupID;
|
||||||
|
|
||||||
|
uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z;
|
||||||
|
uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z;
|
||||||
|
|
||||||
|
const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) / 4 + pcs.inAOff; // Based from inA
|
||||||
|
const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||||
|
float sum = 0.0f;
|
||||||
|
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
|
||||||
|
sum += float(inA[x+i]) * float(inB[y+i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const float all_sum = subgroupAdd(sum);
|
||||||
|
if (subgroupElect()) {
|
||||||
|
out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = all_sum;
|
||||||
|
}
|
||||||
|
}
|
51
kompute-shaders/op_mul_mat_q4_0.comp
Normal file
51
kompute-shaders/op_mul_mat_q4_0.comp
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define BLOCKS_IN_QUANT QK4_0
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q4_0
|
||||||
|
#define N_ROWS 4
|
||||||
|
|
||||||
|
layout(local_size_x_id = 0) in;
|
||||||
|
layout(local_size_y = 1) in;
|
||||||
|
layout(local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne10;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne01;
|
||||||
|
int gqa;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
// The q4_0 version of this function
|
||||||
|
float block_q_n_dot_y(uint block_index, uint yb, uint il) {
|
||||||
|
vec2 acc = vec2(0.0, 0.0);
|
||||||
|
const uint index = (block_index) * SIZE_OF_BLOCK + pcs.inAOff;
|
||||||
|
float d = float(u8BufToFloat16(inA, index));
|
||||||
|
float sumy = 0.0f;
|
||||||
|
for (int i = 0; i < BLOCKS_IN_QUANT/4; i+=2) {
|
||||||
|
const uint16_t b = u8BufToU16(inA, index + 2 + il + i);
|
||||||
|
|
||||||
|
const float yl0 = inB[yb + i];
|
||||||
|
const float yl1 = inB[yb + i + 1];
|
||||||
|
const float yl8 = inB[yb + i + BLOCKS_IN_QUANT/2];
|
||||||
|
const float yl9 = inB[yb + i + BLOCKS_IN_QUANT/2 + 1];
|
||||||
|
|
||||||
|
sumy += yl0 + yl1 + yl8 + yl9;
|
||||||
|
|
||||||
|
acc[0] += yl0 * (b & 0x000F) + yl1 / 256.f * (b & 0x0F00);
|
||||||
|
acc[1] += yl8 / 16.f * (b & 0x00F0) + yl9 / 4096.f * (b & 0xF000);
|
||||||
|
}
|
||||||
|
return d * (sumy * -8.f + acc[0] + acc[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "op_mul_mv_q_n.comp"
|
53
kompute-shaders/op_mul_mat_q4_1.comp
Normal file
53
kompute-shaders/op_mul_mat_q4_1.comp
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define BLOCKS_IN_QUANT QK4_1
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q4_1
|
||||||
|
#define N_ROWS 4
|
||||||
|
|
||||||
|
layout(local_size_x_id = 0) in;
|
||||||
|
layout(local_size_y = 1) in;
|
||||||
|
layout(local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne10;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne01;
|
||||||
|
int gqa;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
// The q4_1 version of this function
|
||||||
|
float block_q_n_dot_y(uint block_index, uint yb, uint il) {
|
||||||
|
vec2 acc = vec2(0.0, 0.0);
|
||||||
|
const uint index = (block_index) * SIZE_OF_BLOCK + pcs.inAOff;
|
||||||
|
float d = float(u8BufToFloat16(inA, index));
|
||||||
|
float m = float(u8BufToFloat16(inA, index+2));
|
||||||
|
|
||||||
|
float sumy = 0.0f;
|
||||||
|
for (int i = 0; i < BLOCKS_IN_QUANT/4; i+=2) {
|
||||||
|
const uint16_t b = u8BufToU16(inA, index + 4 + il + i);
|
||||||
|
|
||||||
|
const float yl0 = inB[yb + i];
|
||||||
|
const float yl1 = inB[yb + i + 1];
|
||||||
|
const float yl8 = inB[yb + i + BLOCKS_IN_QUANT/2];
|
||||||
|
const float yl9 = inB[yb + i + BLOCKS_IN_QUANT/2 + 1];
|
||||||
|
|
||||||
|
sumy += yl0 + yl1 + yl8 + yl9;
|
||||||
|
|
||||||
|
acc[0] += yl0 * (b & 0x000F) + yl1 / 256.f * (b & 0x0F00);
|
||||||
|
acc[1] += yl8 / 16.f * (b & 0x00F0) + yl9 / 4096.f * (b & 0xF000);
|
||||||
|
}
|
||||||
|
return d * (acc[0] + acc[1]) + sumy * m;
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "op_mul_mv_q_n.comp"
|
94
kompute-shaders/op_mul_mat_q6_k.comp
Normal file
94
kompute-shaders/op_mul_mat_q6_k.comp
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q6_k
|
||||||
|
|
||||||
|
layout(local_size_x_id = 0) in;
|
||||||
|
layout(local_size_y_id = 1) in;
|
||||||
|
layout(local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne10;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne01;
|
||||||
|
int gqa;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint8_t kmask1 = uint8_t(0x03);
|
||||||
|
const uint8_t kmask2 = uint8_t(0x0C);
|
||||||
|
const uint8_t kmask3 = uint8_t(0x30);
|
||||||
|
const uint8_t kmask4 = uint8_t(0xC0);
|
||||||
|
|
||||||
|
const uint nb = pcs.ne00/QK_K;
|
||||||
|
|
||||||
|
const uint r0 = gl_WorkGroupID.x;
|
||||||
|
const uint r1 = gl_WorkGroupID.y;
|
||||||
|
const uint r2 = gl_WorkGroupID.z;
|
||||||
|
|
||||||
|
const uint row = (r0 * gl_NumSubgroups + gl_SubgroupID);
|
||||||
|
const uint offset0 = r2/pcs.gqa*(nb*pcs.ne0);
|
||||||
|
const uint x = row * nb + offset0; // Based from inA without base offset
|
||||||
|
const uint yy = r1*pcs.ne10 + r2*pcs.ne00*pcs.ne1+pcs.inBOff; // Based from inB
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
|
||||||
|
// bits of invocation ID for gl_SubgroupSize=32:
|
||||||
|
// x x x x x
|
||||||
|
// 4 3 2 1 0
|
||||||
|
// ( tid ) ix
|
||||||
|
// ip ( il )
|
||||||
|
|
||||||
|
const uint block_stride = gl_SubgroupSize / 16; // number of blocks each subgroup processes
|
||||||
|
const uint tid = gl_SubgroupInvocationID/block_stride; // first block_stride groups have tid=0
|
||||||
|
const uint ix = gl_SubgroupInvocationID%block_stride; // first block is 0..block_stride-1
|
||||||
|
const uint ip = tid/8; // first or second half of block (0 or 1)
|
||||||
|
const uint il = tid%8; // each half has 8 parts, one per scale
|
||||||
|
const uint n = 4; // 4 scales at a time (and 4 sums)
|
||||||
|
const uint l0 = n*il; // offset into half-block, 0..28
|
||||||
|
const uint is = 8*ip + l0/16; // 0, 1, 8, 9
|
||||||
|
|
||||||
|
const uint y_offset = 128*ip + l0;
|
||||||
|
const uint q_offset_l = 64*ip + l0;
|
||||||
|
const uint q_offset_h = 32*ip + l0;
|
||||||
|
|
||||||
|
for (uint i = ix; i < nb; i += block_stride) {
|
||||||
|
|
||||||
|
const uint baseIndex = (x + i) * SIZE_OF_BLOCK + pcs.inAOff;
|
||||||
|
|
||||||
|
const uint qlIndex = q_offset_l;
|
||||||
|
const uint q2Index = qlIndex + QK_K/8;
|
||||||
|
const uint qhIndex = q_offset_h;
|
||||||
|
const uint y = yy + i * QK_K + y_offset;
|
||||||
|
|
||||||
|
float sums[4] = {0.0f, 0.0f, 0.0f, 0.0f};
|
||||||
|
for (uint l = 0; l < n; ++l) {
|
||||||
|
const uint8_t currentQ1 = inA[baseIndex + qlIndex + l];
|
||||||
|
const uint8_t currentQ2 = inA[baseIndex + q2Index + l];
|
||||||
|
const uint8_t currentQh = inA[baseIndex + QK_K/2 + qhIndex + l];
|
||||||
|
|
||||||
|
sums[0] += inB[y+l+ 0] * (int8_t((currentQ1 & 0xF) | ((currentQh & kmask1) << 4)) - 32);
|
||||||
|
sums[1] += inB[y+l+32] * (int8_t((currentQ2 & 0xF) | ((currentQh & kmask2) << 2)) - 32);
|
||||||
|
sums[2] += inB[y+l+64] * (int8_t((currentQ1 >> 4) | ((currentQh & kmask3) << 0)) - 32);
|
||||||
|
sums[3] += inB[y+l+96] * (int8_t((currentQ2 >> 4) | ((currentQh & kmask4) >> 2)) - 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
float d = u8BufToFloat16(inA, baseIndex + QK_K/2 + QK_K/4 + QK_K/16);
|
||||||
|
sumf += d * (sums[0] * int8_t(inA[baseIndex + QK_K/2 + QK_K/4 + is]) + sums[1] * int8_t(inA[baseIndex + QK_K/2 + QK_K/4 + 2 + is]) + sums[2] * int8_t(inA[baseIndex + QK_K/2 + QK_K/4 + 4 + is]) + sums[3] * int8_t(inA[baseIndex + QK_K/2 + QK_K/4 + 6 + is]));
|
||||||
|
}
|
||||||
|
|
||||||
|
const float tot = subgroupAdd(sumf);
|
||||||
|
if (subgroupElect()) {
|
||||||
|
out_[r1*pcs.ne0 + r2*pcs.ne0*pcs.ne1 + row + pcs.outOff] = tot;
|
||||||
|
}
|
||||||
|
}
|
56
kompute-shaders/op_mul_mat_q8_0.comp
Normal file
56
kompute-shaders/op_mul_mat_q8_0.comp
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
#define BLOCKS_IN_QUANT QK8_0
|
||||||
|
#define SIZE_OF_BLOCK sizeof_block_q8_0
|
||||||
|
#define N_ROWS 4
|
||||||
|
|
||||||
|
layout(local_size_x_id = 0) in;
|
||||||
|
layout(local_size_y = 1) in;
|
||||||
|
layout(local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||||
|
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||||
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne10;
|
||||||
|
int ne0;
|
||||||
|
int ne1;
|
||||||
|
int ne01;
|
||||||
|
int gqa;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
#define ELS_PER_BLOCK 32
|
||||||
|
#define SIZE_OF_D 2
|
||||||
|
#define BLOCK_SIZE (ELS_PER_BLOCK + SIZE_OF_D)
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint r0 = gl_WorkGroupID.x;
|
||||||
|
const uint r1 = gl_WorkGroupID.y;
|
||||||
|
const uint im = gl_WorkGroupID.z;
|
||||||
|
|
||||||
|
const uint x = r0 * (pcs.ne00/ELS_PER_BLOCK) * BLOCK_SIZE + pcs.inAOff; // Based from inA
|
||||||
|
const uint y = r1 * pcs.ne10 + pcs.inBOff; // based from inB
|
||||||
|
|
||||||
|
float sumf = 0.0f;
|
||||||
|
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
|
||||||
|
const uint block_number = i / ELS_PER_BLOCK;
|
||||||
|
const uint block_offset = block_number * BLOCK_SIZE;
|
||||||
|
const float d = u8BufToFloat16(inA, x + block_offset);
|
||||||
|
const uint position_in_block = i % ELS_PER_BLOCK;
|
||||||
|
const int q = int8_t(inA[x+block_offset+SIZE_OF_D+position_in_block]);
|
||||||
|
const float dq = d * q;
|
||||||
|
sumf += dq * float(inB[y+i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const float all_sum = subgroupAdd(sumf);
|
||||||
|
if (subgroupElect()) {
|
||||||
|
out_[im*pcs.ne1*pcs.ne0 + r1*pcs.ne0 + r0 + pcs.outOff] = all_sum;
|
||||||
|
}
|
||||||
|
}
|
41
kompute-shaders/op_mul_mv_q_n.comp
Normal file
41
kompute-shaders/op_mul_mv_q_n.comp
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
void main() {
|
||||||
|
if (gl_SubgroupInvocationID > 31)
|
||||||
|
return;
|
||||||
|
|
||||||
|
const uint nb = uint(pcs.ne00/BLOCKS_IN_QUANT);
|
||||||
|
const uint r0 = gl_WorkGroupID.x;
|
||||||
|
const uint r1 = gl_WorkGroupID.y;
|
||||||
|
const uint im = gl_WorkGroupID.z;
|
||||||
|
const uint first_row = (r0 * gl_NumSubgroups + gl_SubgroupID) * N_ROWS;
|
||||||
|
const uint offset0 = first_row * nb + im/pcs.gqa*(nb*pcs.ne0);
|
||||||
|
|
||||||
|
const uint x = offset0; // Based from inA without base offset
|
||||||
|
const uint y = r1*uint(pcs.ne10)+im*pcs.ne00*pcs.ne1+pcs.inBOff; // Based from inB
|
||||||
|
|
||||||
|
float sumf[N_ROWS] = {0.0f, 0.0f, 0.0f, 0.0f};
|
||||||
|
|
||||||
|
const uint ix = gl_SubgroupInvocationID/2;
|
||||||
|
const uint il = (BLOCKS_IN_QUANT/4)*(gl_SubgroupInvocationID%2);
|
||||||
|
|
||||||
|
uint yb = y + ix * BLOCKS_IN_QUANT + il;
|
||||||
|
|
||||||
|
//debugPrintfEXT("gl_NumSubgroups=%d, gl_SubgroupID=%d, gl_SubgroupInvocationID=%d, glSubgroupSize=%d, gl_WorkGroupSize.x=%d, gl_WorkGroupSize.y=%d, gl_WorkGroupSize.z=%d\n",
|
||||||
|
// gl_NumSubgroups, gl_SubgroupID, gl_SubgroupInvocationID, gl_SubgroupSize,
|
||||||
|
// gl_WorkGroupSize.x, gl_WorkGroupSize.y, gl_WorkGroupSize.z);
|
||||||
|
|
||||||
|
for (uint ib = ix; ib < nb; ib += 16) {
|
||||||
|
for (int row = 0; row < N_ROWS; row++) {
|
||||||
|
const uint block_index = x + ib + row * nb;
|
||||||
|
sumf[row] += block_q_n_dot_y(block_index, yb, il);
|
||||||
|
}
|
||||||
|
|
||||||
|
yb += BLOCKS_IN_QUANT * 16;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int row = 0; row < N_ROWS; ++row) {
|
||||||
|
const float tot = subgroupAdd(sumf[row]);
|
||||||
|
if (first_row + row < pcs.ne01 && subgroupElect()) {
|
||||||
|
out_[r1*pcs.ne0 + im*pcs.ne0*pcs.ne1 + first_row + row + pcs.outOff] = tot;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
25
kompute-shaders/op_mulrow.comp
Normal file
25
kompute-shaders/op_mulrow.comp
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
|
||||||
|
layout(binding = 1) buffer restrict readonly tensorInB { float inB[]; };
|
||||||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
uint row;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 4;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 4; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
out_[i + pcs.outOff] = inA[i + pcs.inAOff] * inB[(i % pcs.row) + pcs.inBOff];
|
||||||
|
}
|
||||||
|
}
|
84
kompute-shaders/op_norm.comp
Normal file
84
kompute-shaders/op_norm.comp
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 256) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
uint ne00;
|
||||||
|
uint nb01;
|
||||||
|
float eps;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
shared float sum[gl_WorkGroupSize.x];
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint x = (gl_WorkGroupID.x*pcs.nb01/4) + pcs.inOff; // Based from in_
|
||||||
|
// MEAN
|
||||||
|
// parallel sum
|
||||||
|
sum[gl_LocalInvocationID.x] = 0.0;
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
sum[gl_LocalInvocationID.x] += in_[x+i00];
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
[[unroll]] for (uint i = gl_WorkGroupSize.x/2; i > 0; i /= 2) {
|
||||||
|
if (gl_LocalInvocationID.x < i) {
|
||||||
|
sum[gl_LocalInvocationID.x] += sum[gl_LocalInvocationID.x + i];
|
||||||
|
}
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
}
|
||||||
|
|
||||||
|
// broadcast
|
||||||
|
if (gl_LocalInvocationID.x == 0) {
|
||||||
|
sum[0] /= float(pcs.ne00);
|
||||||
|
}
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
const float mean = sum[0];
|
||||||
|
|
||||||
|
// recenter
|
||||||
|
const uint y = (gl_WorkGroupID.x*pcs.ne00) + pcs.outOff; // Based from out_
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
out_[y+i00] = in_[x+i00] - mean;
|
||||||
|
}
|
||||||
|
|
||||||
|
// VARIANCE
|
||||||
|
// parallel sum
|
||||||
|
sum[gl_LocalInvocationID.x] = 0.0;
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
sum[gl_LocalInvocationID.x] += out_[y+i00] * out_[y+i00];
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
[[unroll]] for (uint i = gl_WorkGroupSize.x/2; i > 0; i /= 2) {
|
||||||
|
if (gl_LocalInvocationID.x < i) {
|
||||||
|
sum[gl_LocalInvocationID.x] += sum[gl_LocalInvocationID.x + i];
|
||||||
|
}
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
}
|
||||||
|
|
||||||
|
// broadcast
|
||||||
|
if (gl_LocalInvocationID.x == 0) {
|
||||||
|
sum[0] /= float(pcs.ne00);
|
||||||
|
}
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
const float variance = sum[0];
|
||||||
|
|
||||||
|
const float scale = 1.0f/sqrt(variance + pcs.eps);
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
out_[y+i00] *= scale;
|
||||||
|
}
|
||||||
|
}
|
21
kompute-shaders/op_relu.comp
Normal file
21
kompute-shaders/op_relu.comp
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 4;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 4; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
out_[i + pcs.outOff] = max(0.0, in_[i + pcs.inOff]);
|
||||||
|
}
|
||||||
|
}
|
53
kompute-shaders/op_rmsnorm.comp
Normal file
53
kompute-shaders/op_rmsnorm.comp
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 512) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
uint ne00;
|
||||||
|
uint nb01;
|
||||||
|
float eps;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
shared float sum[gl_WorkGroupSize.x];
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint x = (gl_WorkGroupID.x*pcs.nb01/4) + pcs.inOff; // Based from in_
|
||||||
|
|
||||||
|
// parallel sum
|
||||||
|
sum[gl_LocalInvocationID.x] = 0.0;
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
sum[gl_LocalInvocationID.x] += in_[x+i00] * in_[x+i00];
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
[[unroll]] for (uint i = gl_WorkGroupSize.x/2; i > 0; i /= 2) {
|
||||||
|
if (gl_LocalInvocationID.x < i) {
|
||||||
|
sum[gl_LocalInvocationID.x] += sum[gl_LocalInvocationID.x + i];
|
||||||
|
}
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
}
|
||||||
|
|
||||||
|
// broadcast
|
||||||
|
if (gl_LocalInvocationID.x == 0) {
|
||||||
|
sum[0] /= float(pcs.ne00);
|
||||||
|
}
|
||||||
|
barrier();
|
||||||
|
memoryBarrierShared();
|
||||||
|
|
||||||
|
const float scale = 1.0f/sqrt(sum[0] + pcs.eps);
|
||||||
|
|
||||||
|
const uint y = (gl_WorkGroupID.x*pcs.ne00) + pcs.outOff; // Based from out_
|
||||||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += gl_WorkGroupSize.x) {
|
||||||
|
out_[y+i00] = in_[x+i00] * scale;
|
||||||
|
}
|
||||||
|
}
|
73
kompute-shaders/op_rope_f16.comp
Normal file
73
kompute-shaders/op_rope_f16.comp
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "rope_common.comp"
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorInA { float16_t inA[]; };
|
||||||
|
layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; };
|
||||||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float16_t out_[]; };
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i3 = gl_WorkGroupID.z;
|
||||||
|
const uint i2 = gl_WorkGroupID.y;
|
||||||
|
const uint i1 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const bool is_neox = (pcs.mode & 2) != 0;
|
||||||
|
|
||||||
|
float corr_dims[2];
|
||||||
|
rope_yarn_corr_dims(pcs.n_dims, pcs.n_orig_ctx, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims);
|
||||||
|
|
||||||
|
const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims);
|
||||||
|
|
||||||
|
const int p = inB[pcs.inBOff + i2];
|
||||||
|
|
||||||
|
float theta = float(p);
|
||||||
|
|
||||||
|
if (!is_neox) {
|
||||||
|
for (uint i0 = 0; i0 < pcs.ne0; i0 += 2) {
|
||||||
|
float cos_theta, sin_theta;
|
||||||
|
rope_yarn(theta, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta);
|
||||||
|
|
||||||
|
theta *= theta_scale;
|
||||||
|
|
||||||
|
const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in
|
||||||
|
const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
const float x0 = float(inA[src]);
|
||||||
|
const float x1 = float(inA[src+1]);
|
||||||
|
|
||||||
|
out_[dst_data] = float16_t(x0*cos_theta - x1*sin_theta);
|
||||||
|
out_[dst_data+1] = float16_t(x0*sin_theta + x1*cos_theta);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const float inv_ndims = -1.f/pcs.n_dims;
|
||||||
|
for (uint ic = 0; ic < pcs.n_dims; ic += 2) {
|
||||||
|
const uint cur_rot = ic;
|
||||||
|
|
||||||
|
float cos_theta, sin_theta;
|
||||||
|
rope_yarn(theta, pcs.freq_scale, corr_dims, cur_rot, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta);
|
||||||
|
|
||||||
|
theta *= theta_scale;
|
||||||
|
|
||||||
|
const uint i0 = ic/2;
|
||||||
|
|
||||||
|
const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in
|
||||||
|
const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
const float x0 = float(inA[src]);
|
||||||
|
const float x1 = float(inA[src+pcs.n_dims/2]);
|
||||||
|
|
||||||
|
out_[dst_data] = float16_t(x0*cos_theta - x1*sin_theta);
|
||||||
|
out_[dst_data+pcs.n_dims/2] = float16_t(x0*sin_theta + x1*cos_theta);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uint ic = pcs.n_dims; ic < pcs.ne0; ic += 2) {
|
||||||
|
const uint i0 = ic;
|
||||||
|
|
||||||
|
const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in
|
||||||
|
const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
out_[dst_data + 0] = inA[src + 0];
|
||||||
|
out_[dst_data + 1] = inA[src + 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
73
kompute-shaders/op_rope_f32.comp
Normal file
73
kompute-shaders/op_rope_f32.comp
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "rope_common.comp"
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
|
||||||
|
layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; };
|
||||||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i3 = gl_WorkGroupID.z;
|
||||||
|
const uint i2 = gl_WorkGroupID.y;
|
||||||
|
const uint i1 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const bool is_neox = (pcs.mode & 2) != 0;
|
||||||
|
|
||||||
|
float corr_dims[2];
|
||||||
|
rope_yarn_corr_dims(pcs.n_dims, pcs.n_orig_ctx, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims);
|
||||||
|
|
||||||
|
const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims);
|
||||||
|
|
||||||
|
const int p = inB[pcs.inBOff + i2];
|
||||||
|
|
||||||
|
float theta = float(p);
|
||||||
|
|
||||||
|
if (!is_neox) {
|
||||||
|
for (uint i0 = 0; i0 < pcs.ne0; i0 += 2) {
|
||||||
|
float cos_theta, sin_theta;
|
||||||
|
rope_yarn(theta, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta);
|
||||||
|
|
||||||
|
theta *= theta_scale;
|
||||||
|
|
||||||
|
const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in
|
||||||
|
const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
const float x0 = inA[src];
|
||||||
|
const float x1 = inA[src+1];
|
||||||
|
|
||||||
|
out_[dst_data] = x0*cos_theta - x1*sin_theta;
|
||||||
|
out_[dst_data+1] = x0*sin_theta + x1*cos_theta;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const float inv_ndims = -1.f/pcs.n_dims;
|
||||||
|
for (uint ic = 0; ic < pcs.n_dims; ic += 2) {
|
||||||
|
const uint cur_rot = ic;
|
||||||
|
|
||||||
|
float cos_theta, sin_theta;
|
||||||
|
rope_yarn(theta, pcs.freq_scale, corr_dims, cur_rot, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta);
|
||||||
|
|
||||||
|
theta *= theta_scale;
|
||||||
|
|
||||||
|
const uint i0 = ic/2;
|
||||||
|
|
||||||
|
const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in
|
||||||
|
const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
const float x0 = inA[src];
|
||||||
|
const float x1 = inA[src+pcs.n_dims/2];
|
||||||
|
|
||||||
|
out_[dst_data] = x0*cos_theta - x1*sin_theta;
|
||||||
|
out_[dst_data+pcs.n_dims/2] = x0*sin_theta + x1*cos_theta;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uint ic = pcs.n_dims; ic < pcs.ne0; ic += 2) {
|
||||||
|
const uint i0 = ic;
|
||||||
|
|
||||||
|
const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in
|
||||||
|
const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
out_[dst_data + 0] = inA[src + 0];
|
||||||
|
out_[dst_data + 1] = inA[src + 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
19
kompute-shaders/op_scale.comp
Normal file
19
kompute-shaders/op_scale.comp
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
float scale;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint i = gl_WorkGroupID.x;
|
||||||
|
out_[i + pcs.outOff] = in_[i + pcs.inOff] * pcs.scale;
|
||||||
|
}
|
23
kompute-shaders/op_scale_8.comp
Normal file
23
kompute-shaders/op_scale_8.comp
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
float scale;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 8;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 8; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
out_[i + pcs.outOff] = in_[i + pcs.inOff] * pcs.scale;
|
||||||
|
}
|
||||||
|
}
|
22
kompute-shaders/op_silu.comp
Normal file
22
kompute-shaders/op_silu.comp
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
const uint baseIndex = gl_WorkGroupID.x * 4;
|
||||||
|
|
||||||
|
for (uint x = 0; x < 4; x++) {
|
||||||
|
const uint i = baseIndex + x;
|
||||||
|
const float y = in_[i + pcs.inOff];
|
||||||
|
out_[i + pcs.outOff] = y / (1.0 + exp(-y));
|
||||||
|
}
|
||||||
|
}
|
51
kompute-shaders/op_softmax.comp
Normal file
51
kompute-shaders/op_softmax.comp
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
// TODO: implement multi-simd softmax (llama.cpp commit e16b9fa4)
|
||||||
|
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
layout(local_size_x_id = 0) in;
|
||||||
|
|
||||||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||||||
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
||||||
|
|
||||||
|
layout(push_constant) uniform PushConstants {
|
||||||
|
uint inOff;
|
||||||
|
uint outOff;
|
||||||
|
int ne00;
|
||||||
|
int ne01;
|
||||||
|
int ne02;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
if (gl_SubgroupInvocationID > 31)
|
||||||
|
return;
|
||||||
|
|
||||||
|
const uint i03 = gl_WorkGroupID.z;
|
||||||
|
const uint i02 = gl_WorkGroupID.y;
|
||||||
|
const uint i01 = gl_WorkGroupID.x;
|
||||||
|
|
||||||
|
const uint extra_off = i03*pcs.ne02*pcs.ne01*pcs.ne00 + i02*pcs.ne01*pcs.ne00 + i01*pcs.ne00;
|
||||||
|
const uint psrc0 = extra_off + pcs.inOff; // Based from in_
|
||||||
|
const uint pdst = extra_off + pcs.outOff; // Based from out_
|
||||||
|
|
||||||
|
// parallel max
|
||||||
|
float localMax = uintBitsToFloat(0xFF800000);
|
||||||
|
for (uint i00 = gl_SubgroupInvocationID.x; i00 < pcs.ne00; i00 += 32) {
|
||||||
|
localMax = max(localMax, in_[psrc0 + i00]);
|
||||||
|
}
|
||||||
|
float max_ = subgroupMax(localMax);
|
||||||
|
|
||||||
|
// parallel sum
|
||||||
|
float localSum = 0.0f;
|
||||||
|
for (uint i00 = gl_SubgroupInvocationID.x; i00 < pcs.ne00; i00 += 32) {
|
||||||
|
const float exp_psrc0 = exp(in_[psrc0 + i00] - max_);
|
||||||
|
localSum += exp_psrc0;
|
||||||
|
out_[pdst + i00] = exp_psrc0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float sum = subgroupAdd(localSum);
|
||||||
|
for (uint i00 = gl_SubgroupInvocationID.x; i00 < pcs.ne00; i00 += 32) {
|
||||||
|
out_[pdst + i00] /= sum;
|
||||||
|
}
|
||||||
|
}
|
67
kompute-shaders/rope_common.comp
Normal file
67
kompute-shaders/rope_common.comp
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
#include "common.comp"
|
||||||
|
|
||||||
|
// TODO: use a local size of 32 or more (Metal uses 1024)
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout (push_constant) uniform parameter {
|
||||||
|
uint inAOff;
|
||||||
|
uint inBOff;
|
||||||
|
uint outOff;
|
||||||
|
int n_dims;
|
||||||
|
int mode;
|
||||||
|
int n_orig_ctx;
|
||||||
|
float freq_base;
|
||||||
|
float freq_scale;
|
||||||
|
float ext_factor;
|
||||||
|
float attn_factor;
|
||||||
|
float beta_fast;
|
||||||
|
float beta_slow;
|
||||||
|
uint nb00;
|
||||||
|
uint nb01;
|
||||||
|
uint nb02;
|
||||||
|
uint nb03;
|
||||||
|
int ne0;
|
||||||
|
uint nb0;
|
||||||
|
uint nb1;
|
||||||
|
uint nb2;
|
||||||
|
uint nb3;
|
||||||
|
} pcs;
|
||||||
|
|
||||||
|
float rope_yarn_ramp(const float low, const float high, const float i0) {
|
||||||
|
const float y = (i0 / 2 - low) / max(0.001f, high - low);
|
||||||
|
return 1.0f - min(1.0f, max(0.0f, y));
|
||||||
|
}
|
||||||
|
|
||||||
|
// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
|
||||||
|
// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
|
||||||
|
void rope_yarn(
|
||||||
|
float theta_extrap, float freq_scale, float corr_dims[2], float i0, float ext_factor, float mscale,
|
||||||
|
out float cos_theta, out float sin_theta
|
||||||
|
) {
|
||||||
|
// Get n-d rotational scaling corrected for extrapolation
|
||||||
|
float theta_interp = freq_scale * theta_extrap;
|
||||||
|
float theta = theta_interp;
|
||||||
|
if (ext_factor != 0.0f) {
|
||||||
|
float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
|
||||||
|
theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
|
||||||
|
|
||||||
|
// Get n-d magnitude scaling corrected for interpolation
|
||||||
|
mscale *= 1.0f + 0.1f * log(1.0f / freq_scale);
|
||||||
|
}
|
||||||
|
cos_theta = cos(theta) * mscale;
|
||||||
|
sin_theta = sin(theta) * mscale;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
|
||||||
|
// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
|
||||||
|
float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) {
|
||||||
|
return n_dims * log(n_orig_ctx / (n_rot * TWOPI_F)) / (2 * log(base));
|
||||||
|
}
|
||||||
|
|
||||||
|
void rope_yarn_corr_dims(
|
||||||
|
int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, out float dims[2]
|
||||||
|
) {
|
||||||
|
// start and end correction dims
|
||||||
|
dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base)));
|
||||||
|
dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base)));
|
||||||
|
}
|
19
llama.h
19
llama.h
|
@ -45,7 +45,7 @@
|
||||||
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
||||||
#define LLAMA_SESSION_VERSION 3
|
#define LLAMA_SESSION_VERSION 3
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) || defined(GGML_USE_KOMPUTE)
|
||||||
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
||||||
#define LLAMA_SUPPORTS_GPU_OFFLOAD
|
#define LLAMA_SUPPORTS_GPU_OFFLOAD
|
||||||
#endif
|
#endif
|
||||||
|
@ -116,6 +116,12 @@ extern "C" {
|
||||||
LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
|
LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum llama_split_mode {
|
||||||
|
LLAMA_SPLIT_NONE = 0, // single GPU
|
||||||
|
LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs
|
||||||
|
LLAMA_SPLIT_ROW = 2, // split rows across GPUs
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct llama_token_data {
|
typedef struct llama_token_data {
|
||||||
llama_token id; // token id
|
llama_token id; // token id
|
||||||
float logit; // log-odds of the token
|
float logit; // log-odds of the token
|
||||||
|
@ -178,8 +184,15 @@ extern "C" {
|
||||||
|
|
||||||
struct llama_model_params {
|
struct llama_model_params {
|
||||||
int32_t n_gpu_layers; // number of layers to store in VRAM
|
int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||||
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
||||||
const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
|
|
||||||
|
// main_gpu interpretation depends on split_mode:
|
||||||
|
// LLAMA_SPLIT_NONE: the GPU that is used for the entire model
|
||||||
|
// LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
|
||||||
|
// LLAMA_SPLIT_LAYER: ignored
|
||||||
|
int32_t main_gpu;
|
||||||
|
// proportion of the model (layers or rows) to offload to each GPU, size: LLAMA_MAX_DEVICES
|
||||||
|
const float * tensor_split;
|
||||||
|
|
||||||
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||||
// If the provided progress_callback returns true, model loading continues.
|
// If the provided progress_callback returns true, model loading continues.
|
||||||
|
|
|
@ -360,7 +360,10 @@ struct test_case {
|
||||||
// check if backends support op
|
// check if backends support op
|
||||||
bool supported = true;
|
bool supported = true;
|
||||||
for (ggml_backend_t backend : {backend1, backend2}) {
|
for (ggml_backend_t backend : {backend1, backend2}) {
|
||||||
if (!ggml_backend_supports_op(backend, out)) {
|
if (
|
||||||
|
!ggml_backend_supports_op(backend, out)
|
||||||
|
|| (op_desc(out) == "MOE" && !strcmp(ggml_backend_name(backend), "Kompute"))
|
||||||
|
) {
|
||||||
printf("not supported [%s] ", ggml_backend_name(backend));
|
printf("not supported [%s] ", ggml_backend_name(backend));
|
||||||
supported = false;
|
supported = false;
|
||||||
}
|
}
|
||||||
|
@ -376,6 +379,11 @@ struct test_case {
|
||||||
|
|
||||||
// allocate
|
// allocate
|
||||||
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
|
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
|
||||||
|
if (buf == NULL) {
|
||||||
|
printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
|
||||||
|
ggml_free(ctx);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// build graph
|
// build graph
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
@ -463,19 +471,23 @@ struct test_case {
|
||||||
GGML_UNUSED(index);
|
GGML_UNUSED(index);
|
||||||
};
|
};
|
||||||
|
|
||||||
ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
|
const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
|
||||||
|
|
||||||
if (ud.ok) {
|
if (!cmp_ok) {
|
||||||
printf("\033[1;32mOK\033[0m\n");
|
printf("compare failed ");
|
||||||
} else {
|
|
||||||
printf("\033[1;31mFAIL\033[0m\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_free(buf);
|
ggml_backend_buffer_free(buf);
|
||||||
|
|
||||||
ggml_free(ctx);
|
ggml_free(ctx);
|
||||||
|
|
||||||
return ud.ok;
|
if (ud.ok && cmp_ok) {
|
||||||
|
printf("\033[1;32mOK\033[0m\n");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("\033[1;31mFAIL\033[0m\n");
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool eval_perf(ggml_backend_t backend, const char * op_name) {
|
bool eval_perf(ggml_backend_t backend, const char * op_name) {
|
||||||
|
@ -519,6 +531,11 @@ struct test_case {
|
||||||
|
|
||||||
// allocate
|
// allocate
|
||||||
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
|
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
|
||||||
|
if (buf == NULL) {
|
||||||
|
printf("failed to allocate tensors\n");
|
||||||
|
ggml_free(ctx);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// randomize tensors
|
// randomize tensors
|
||||||
initialize_tensors(ctx);
|
initialize_tensors(ctx);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue