Add vectorized loading and zeropadding for matrix multiplication
This commit is contained in:
parent
8d351b8bd8
commit
e4903957ec
5 changed files with 423 additions and 207 deletions
500
ggml-vulkan.cpp
500
ggml-vulkan.cpp
|
@ -84,6 +84,7 @@ struct vk_pipeline {
|
||||||
uint32_t push_constant_size;
|
uint32_t push_constant_size;
|
||||||
uint32_t parameter_count;
|
uint32_t parameter_count;
|
||||||
std::array<uint32_t, 3> wg_denoms;
|
std::array<uint32_t, 3> wg_denoms;
|
||||||
|
uint32_t align;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vk_queue {
|
struct vk_queue {
|
||||||
|
@ -142,9 +143,9 @@ bool vk_fp16_support = false;
|
||||||
|
|
||||||
static std::vector<std::tuple<void*, size_t, vk_buffer>> vk_buf_list;
|
static std::vector<std::tuple<void*, size_t, vk_buffer>> vk_buf_list;
|
||||||
|
|
||||||
static vk_pipeline ggml_vk_create_pipeline(const std::string& path, const std::string& entrypoint, uint32_t parameter_count, uint32_t push_constant_count, std::array<uint32_t, 3> wg_denoms, std::vector<int>&& specialization_constants) {
|
static vk_pipeline ggml_vk_create_pipeline(const std::string& path, const std::string& entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<int>&& specialization_constants, uint32_t align) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_create_pipeline(" << path << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_count << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants)" << std::endl;
|
std::cerr << "ggml_vk_create_pipeline(" << path << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << ")" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
GGML_ASSERT(parameter_count > 0);
|
GGML_ASSERT(parameter_count > 0);
|
||||||
GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0);
|
GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0);
|
||||||
|
@ -153,8 +154,9 @@ static vk_pipeline ggml_vk_create_pipeline(const std::string& path, const std::s
|
||||||
|
|
||||||
pipeline.name = path;
|
pipeline.name = path;
|
||||||
pipeline.parameter_count = parameter_count;
|
pipeline.parameter_count = parameter_count;
|
||||||
pipeline.push_constant_size = push_constant_count * sizeof(int);
|
pipeline.push_constant_size = push_constant_size;
|
||||||
pipeline.wg_denoms = wg_denoms;
|
pipeline.wg_denoms = wg_denoms;
|
||||||
|
pipeline.align = align;
|
||||||
|
|
||||||
std::vector<char> matmul_shader_contents;
|
std::vector<char> matmul_shader_contents;
|
||||||
if (std::ifstream shader_file{ path, std::ios::binary | std::ios::ate }) {
|
if (std::ifstream shader_file{ path, std::ios::binary | std::ios::ate }) {
|
||||||
|
@ -446,7 +448,7 @@ static vk_buffer ggml_vk_create_buffer(size_t size, VmaAllocationCreateFlags all
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_vk_sync_buffers(vk::CommandBuffer& cmd_buffer, std::vector<vk_buffer>&& buffers, vk_queue& q, vk::AccessFlags&& src_mask, vk::AccessFlags&& dst_mask) {
|
static void ggml_vk_sync_buffers(vk::CommandBuffer& cmd_buffer, std::vector<vk_buffer>&& buffers, vk_queue& q, vk::AccessFlags&& src_mask, vk::AccessFlags&& dst_mask, bool force_sync) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_sync_buffers()" << std::endl;
|
std::cerr << "ggml_vk_sync_buffers()" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
|
@ -460,11 +462,12 @@ static void ggml_vk_sync_buffers(vk::CommandBuffer& cmd_buffer, std::vector<vk_b
|
||||||
sfi = buf.qf_owner;
|
sfi = buf.qf_owner;
|
||||||
dfi = q.queue_family_index;
|
dfi = q.queue_family_index;
|
||||||
buf.qf_owner = dfi;
|
buf.qf_owner = dfi;
|
||||||
} else {
|
bmem_barriers.push_back({ src_mask, dst_mask, sfi, dfi, buf.buffer, 0, VK_WHOLE_SIZE });
|
||||||
|
} else if (force_sync) {
|
||||||
sfi = vk::QueueFamilyIgnored;
|
sfi = vk::QueueFamilyIgnored;
|
||||||
dfi = vk::QueueFamilyIgnored;
|
dfi = vk::QueueFamilyIgnored;
|
||||||
|
bmem_barriers.push_back({ src_mask, dst_mask, sfi, dfi, buf.buffer, 0, VK_WHOLE_SIZE });
|
||||||
}
|
}
|
||||||
bmem_barriers.push_back({ src_mask, dst_mask, sfi, dfi, buf.buffer, 0, VK_WHOLE_SIZE });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bmem_barriers.empty()) {
|
if (bmem_barriers.empty()) {
|
||||||
|
@ -506,6 +509,7 @@ static void ggml_vk_destroy_buffer(vk_buffer& buf) {
|
||||||
void ggml_vk_test_transfer(size_t ne);
|
void ggml_vk_test_transfer(size_t ne);
|
||||||
void ggml_vk_test_matmul_f32(size_t m, size_t n, size_t k, size_t num_it, int split_k, int shader_size);
|
void ggml_vk_test_matmul_f32(size_t m, size_t n, size_t k, size_t num_it, int split_k, int shader_size);
|
||||||
void ggml_vk_test_matmul_f16(size_t m, size_t n, size_t k, size_t num_it, int split_k, int shader_size);
|
void ggml_vk_test_matmul_f16(size_t m, size_t n, size_t k, size_t num_it, int split_k, int shader_size);
|
||||||
|
void ggml_vk_test_buffer_write_zeropad(size_t m, size_t k, size_t align);
|
||||||
|
|
||||||
void ggml_vk_init(void) {
|
void ggml_vk_init(void) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
|
@ -527,7 +531,7 @@ void ggml_vk_init(void) {
|
||||||
};
|
};
|
||||||
vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags(), &app_info, layers, extensions);
|
vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags(), &app_info, layers, extensions);
|
||||||
#ifdef VK_VALIDATE
|
#ifdef VK_VALIDATE
|
||||||
const std::vector<vk::ValidationFeatureEnableEXT> features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
|
const std::vector<vk::ValidationFeatureEnableEXT> features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices, vk::ValidationFeatureEnableEXT::eSynchronizationValidation };
|
||||||
vk::ValidationFeaturesEXT validation_features = {
|
vk::ValidationFeaturesEXT validation_features = {
|
||||||
features_enable,
|
features_enable,
|
||||||
{},
|
{},
|
||||||
|
@ -637,18 +641,18 @@ void ggml_vk_init(void) {
|
||||||
auto warptile_s = { 32, 32, 32, 8, 32, 32, 2, 2, 2 };
|
auto warptile_s = { 32, 32, 32, 8, 32, 32, 2, 2, 2 };
|
||||||
|
|
||||||
// Shaders
|
// Shaders
|
||||||
vk_pipeline_matmul_f32_l = ggml_vk_create_pipeline("vk_shaders/matmul_f32.spv", "main", 3, 7, {128, 128, 1}, warptile_l);
|
vk_pipeline_matmul_f32_l = ggml_vk_create_pipeline("vk_shaders/matmul_f32.spv", "main", 3, 7 * sizeof(int), {128, 128, 1}, warptile_l, 128);
|
||||||
vk_pipeline_matmul_f32_m = ggml_vk_create_pipeline("vk_shaders/matmul_f32.spv", "main", 3, 7, { 64, 64, 1}, warptile_m);
|
vk_pipeline_matmul_f32_m = ggml_vk_create_pipeline("vk_shaders/matmul_f32.spv", "main", 3, 7 * sizeof(int), { 64, 64, 1}, warptile_m, 64);
|
||||||
vk_pipeline_matmul_f32_s = ggml_vk_create_pipeline("vk_shaders/matmul_f32.spv", "main", 3, 7, { 32, 32, 1}, warptile_s);
|
vk_pipeline_matmul_f32_s = ggml_vk_create_pipeline("vk_shaders/matmul_f32.spv", "main", 3, 7 * sizeof(int), { 32, 32, 1}, warptile_s, 32);
|
||||||
if (vk_fp16_support) {
|
if (vk_fp16_support) {
|
||||||
vk_pipeline_matmul_f16_l = ggml_vk_create_pipeline("vk_shaders/matmul_f16.spv", "main", 3, 7, {128, 128, 1}, warptile_l);
|
vk_pipeline_matmul_f16_l = ggml_vk_create_pipeline("vk_shaders/matmul_f16.spv", "main", 3, 7 * sizeof(int), {128, 128, 1}, warptile_l, 128);
|
||||||
vk_pipeline_matmul_f16_m = ggml_vk_create_pipeline("vk_shaders/matmul_f16.spv", "main", 3, 7, { 64, 64, 1}, warptile_m);
|
vk_pipeline_matmul_f16_m = ggml_vk_create_pipeline("vk_shaders/matmul_f16.spv", "main", 3, 7 * sizeof(int), { 64, 64, 1}, warptile_m, 64);
|
||||||
vk_pipeline_matmul_f16_s = ggml_vk_create_pipeline("vk_shaders/matmul_f16.spv", "main", 3, 7, { 32, 32, 1}, warptile_s);
|
vk_pipeline_matmul_f16_s = ggml_vk_create_pipeline("vk_shaders/matmul_f16.spv", "main", 3, 7 * sizeof(int), { 32, 32, 1}, warptile_s, 32);
|
||||||
}
|
}
|
||||||
vk_pipeline_matmul_split_k_reduce = ggml_vk_create_pipeline("vk_shaders/matmul_split_k_reduce.spv", "main", 1, 3, {32, 32, 1}, {});
|
vk_pipeline_matmul_split_k_reduce = ggml_vk_create_pipeline("vk_shaders/matmul_split_k_reduce.spv", "main", 1, 3 * sizeof(int), {32, 32, 1}, {}, 1);
|
||||||
|
|
||||||
vk_pipeline_f16_to_f32 = ggml_vk_create_pipeline("vk_shaders/f16_to_f32.spv", "main", 2, 1, {64, 1, 1}, {});
|
vk_pipeline_f16_to_f32 = ggml_vk_create_pipeline("vk_shaders/f16_to_f32.spv", "main", 2, 4 * sizeof(int), {64, 1, 1}, {}, 1);
|
||||||
vk_pipeline_dequant_q4_0 = ggml_vk_create_pipeline("vk_shaders/dequant_q4_0.spv", "main", 2, 1, {256*32, 1, 1}, {}); // Group size * values per quant group
|
vk_pipeline_dequant_q4_0 = ggml_vk_create_pipeline("vk_shaders/dequant_q4_0.spv", "main", 2, 4 * sizeof(int), {256*32, 1, 1}, {}, 1);
|
||||||
|
|
||||||
// Queues
|
// Queues
|
||||||
vk_compute_queue = ggml_vk_create_queue(compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader });
|
vk_compute_queue = ggml_vk_create_queue(compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader });
|
||||||
|
@ -657,27 +661,31 @@ void ggml_vk_init(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(VK_CHK_KERNEL)
|
#if defined(VK_CHK_KERNEL)
|
||||||
|
ggml_vk_test_buffer_write_zeropad(233, 97, 128);
|
||||||
|
ggml_vk_test_buffer_write_zeropad(233, 97, 1);
|
||||||
|
ggml_vk_test_buffer_write_zeropad(256, 128, 1);
|
||||||
|
|
||||||
int step = 16;
|
int step = 16;
|
||||||
for (size_t m = step; m < 64; m += step) {
|
for (size_t m = step; m < 64; m += step) {
|
||||||
ggml_vk_test_transfer(1024 * 1024 * m);
|
ggml_vk_test_transfer(1024 * 1024 * m);
|
||||||
}
|
}
|
||||||
const std::vector<size_t> vals {
|
const std::vector<size_t> vals {
|
||||||
|
128, 110, 622,
|
||||||
|
511, 511, 127,
|
||||||
|
511, 511, 7,
|
||||||
|
511, 511, 17,
|
||||||
49, 49, 128,
|
49, 49, 128,
|
||||||
128, 49, 49,
|
128, 49, 49,
|
||||||
4096, 49, 4096,
|
4096, 49, 4096,
|
||||||
11008, 49, 4096,
|
11008, 49, 4096,
|
||||||
4096, 49, 11008,
|
4096, 49, 11008,
|
||||||
4096, 49, 4096,
|
|
||||||
32000, 49, 4096,
|
32000, 49, 4096,
|
||||||
512, 512, 128,
|
512, 512, 128,
|
||||||
128, 512, 512,
|
128, 512, 512,
|
||||||
4096, 512, 4096,
|
4096, 512, 4096,
|
||||||
11008, 512, 4096,
|
11008, 512, 4096,
|
||||||
4096, 512, 11008,
|
4096, 512, 11008,
|
||||||
4096, 512, 4096,
|
|
||||||
32000, 512, 4096,
|
32000, 512, 4096,
|
||||||
512, 512, 128,
|
|
||||||
128, 512, 512,
|
|
||||||
};
|
};
|
||||||
for (size_t i = 0; i < vals.size(); i += 3) {
|
for (size_t i = 0; i < vals.size(); i += 3) {
|
||||||
ggml_vk_test_matmul_f32(vals[i], vals[i + 1], vals[i + 2], 1000, 1, 0);
|
ggml_vk_test_matmul_f32(vals[i], vals[i + 1], vals[i + 2], 1000, 1, 0);
|
||||||
|
@ -832,11 +840,13 @@ void ggml_vk_host_free(void* ptr) {
|
||||||
std::cerr << "ggml_vk_host_free()" << std::endl;
|
std::cerr << "ggml_vk_host_free()" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
vk_buffer* buf = nullptr;
|
vk_buffer* buf = nullptr;
|
||||||
|
size_t index;
|
||||||
for (size_t i = 0; i < vk_buf_list.size(); i++) {
|
for (size_t i = 0; i < vk_buf_list.size(); i++) {
|
||||||
const uint8_t* addr = (const uint8_t*) std::get<0>(vk_buf_list[i]);
|
const uint8_t* addr = (const uint8_t*) std::get<0>(vk_buf_list[i]);
|
||||||
const uint8_t* endr = addr + std::get<1>(vk_buf_list[i]);
|
const uint8_t* endr = addr + std::get<1>(vk_buf_list[i]);
|
||||||
if (ptr >= addr && ptr < endr) {
|
if (ptr >= addr && ptr < endr) {
|
||||||
buf = &std::get<2>(vk_buf_list[i]);
|
buf = &std::get<2>(vk_buf_list[i]);
|
||||||
|
index = i;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -846,6 +856,8 @@ void ggml_vk_host_free(void* ptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_vk_destroy_buffer(*buf);
|
ggml_vk_destroy_buffer(*buf);
|
||||||
|
|
||||||
|
vk_buf_list.erase(vk_buf_list.begin() + index);
|
||||||
}
|
}
|
||||||
|
|
||||||
static vk_submission ggml_vk_begin_submission(vk_queue& q) {
|
static vk_submission ggml_vk_begin_submission(vk_queue& q) {
|
||||||
|
@ -856,7 +868,7 @@ static vk_submission ggml_vk_begin_submission(vk_queue& q) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_vk_dispatch_pipeline(vk_submission& s, vk_pipeline& pipeline, std::vector<vk_buffer>&& buffers, const std::vector<int>&& push_constants, std::array<uint32_t, 3> elements, vk_queue& q) {
|
static void ggml_vk_dispatch_pipeline(vk_submission& s, vk_pipeline& pipeline, std::vector<vk_buffer> buffers, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements, vk_queue& q) {
|
||||||
uint32_t wg0 = CEIL_DIV(elements[0], pipeline.wg_denoms[0]);
|
uint32_t wg0 = CEIL_DIV(elements[0], pipeline.wg_denoms[0]);
|
||||||
uint32_t wg1 = CEIL_DIV(elements[1], pipeline.wg_denoms[1]);
|
uint32_t wg1 = CEIL_DIV(elements[1], pipeline.wg_denoms[1]);
|
||||||
uint32_t wg2 = CEIL_DIV(elements[2], pipeline.wg_denoms[2]);
|
uint32_t wg2 = CEIL_DIV(elements[2], pipeline.wg_denoms[2]);
|
||||||
|
@ -874,9 +886,7 @@ static void ggml_vk_dispatch_pipeline(vk_submission& s, vk_pipeline& pipeline, s
|
||||||
|
|
||||||
vk_device.updateDescriptorSets(write_descriptor_sets, {});
|
vk_device.updateDescriptorSets(write_descriptor_sets, {});
|
||||||
|
|
||||||
ggml_vk_sync_buffers(s.buffer, std::move(buffers), q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead);
|
s.buffer.pushConstants(pipeline.layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
|
||||||
|
|
||||||
s.buffer.pushConstants<int>(pipeline.layout, vk::ShaderStageFlagBits::eCompute, 0, push_constants);
|
|
||||||
s.buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline.pipeline);
|
s.buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline.pipeline);
|
||||||
s.buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
|
s.buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
|
||||||
pipeline.layout,
|
pipeline.layout,
|
||||||
|
@ -903,11 +913,11 @@ static vk_sequence ggml_vk_buffer_write_2d_async(vk_buffer* dst, size_t offset,
|
||||||
// Buffer is already mapped
|
// Buffer is already mapped
|
||||||
if(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
if(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
||||||
std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
|
std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
|
||||||
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
// Check if src is pinned memory
|
// Check if src is pinned memory
|
||||||
vk_buffer* buf = nullptr;
|
vk_buffer* buf = nullptr;
|
||||||
size_t buf_offset = 0;
|
size_t buf_offset = 0;
|
||||||
PROFILE("ggml_vk_buffer_write pinned check",
|
|
||||||
for (size_t i = 0; i < vk_buf_list.size(); i++) {
|
for (size_t i = 0; i < vk_buf_list.size(); i++) {
|
||||||
const uint8_t* addr = (const uint8_t*) std::get<0>(vk_buf_list[i]);
|
const uint8_t* addr = (const uint8_t*) std::get<0>(vk_buf_list[i]);
|
||||||
const uint8_t* endr = addr + std::get<1>(vk_buf_list[i]);
|
const uint8_t* endr = addr + std::get<1>(vk_buf_list[i]);
|
||||||
|
@ -917,22 +927,29 @@ static vk_sequence ggml_vk_buffer_write_2d_async(vk_buffer* dst, size_t offset,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
|
||||||
|
|
||||||
vk_submission s = ggml_vk_create_submission(q, std::move(wait_semaphores), std::move(signal_semaphores));
|
vk_submission s = ggml_vk_create_submission(q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
|
|
||||||
if (buf != nullptr) {
|
if (buf != nullptr) {
|
||||||
// Memory is pinned, use as staging buffer
|
// Memory is pinned, use as staging buffer
|
||||||
std::vector<VkBufferCopy> slices(height);
|
std::vector<vk::BufferCopy> slices(1);
|
||||||
for (size_t i = 0; i < height; i++) {
|
if (width == spitch) {
|
||||||
slices[i].srcOffset = buf_offset + i * spitch;
|
// Only do single write if stride is equal
|
||||||
slices[i].dstOffset = offset + i * width;
|
slices[0].srcOffset = buf_offset;
|
||||||
slices[i].size = width;
|
slices[0].dstOffset = offset;
|
||||||
|
slices[0].size = width * height;
|
||||||
|
} else {
|
||||||
|
slices.resize(height);
|
||||||
|
for (size_t i = 0; i < height; i++) {
|
||||||
|
slices[i].srcOffset = buf_offset + i * spitch;
|
||||||
|
slices[i].dstOffset = offset + i * width;
|
||||||
|
slices[i].size = width;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
||||||
ggml_vk_sync_buffers(s.buffer, { *dst }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eMemoryWrite);
|
ggml_vk_sync_buffers(s.buffer, { *dst }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eMemoryWrite, false);
|
||||||
vkCmdCopyBuffer(s.buffer, buf->buffer, dst->buffer, height, slices.data());
|
s.buffer.copyBuffer(buf->buffer, dst->buffer, slices);
|
||||||
s.buffer.end();
|
s.buffer.end();
|
||||||
return { s };
|
return { s };
|
||||||
}
|
}
|
||||||
|
@ -953,12 +970,16 @@ static vk_sequence ggml_vk_buffer_write_2d_async(vk_buffer* dst, size_t offset,
|
||||||
width * height};
|
width * height};
|
||||||
|
|
||||||
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
||||||
ggml_vk_sync_buffers(s.buffer, { *dst }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eMemoryWrite);
|
ggml_vk_sync_buffers(s.buffer, { *dst }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eMemoryWrite, false);
|
||||||
vkCmdCopyBuffer(s.buffer, dst->sb_write->buffer, dst->buffer, 1, &buf_copy);
|
vkCmdCopyBuffer(s.buffer, dst->sb_write->buffer, dst->buffer, 1, &buf_copy);
|
||||||
s.buffer.end();
|
s.buffer.end();
|
||||||
|
|
||||||
for (size_t i = 0; i < height; i++) {
|
if (width == spitch) {
|
||||||
memcpy((uint8_t *)dst->sb_write->info.pMappedData + offset + i * width, (const uint8_t *) src + i * spitch, width);
|
memcpy(dst->sb_write->info.pMappedData, src, width * height);
|
||||||
|
} else {
|
||||||
|
for (size_t i = 0; i < height; i++) {
|
||||||
|
memcpy((uint8_t *)dst->sb_write->info.pMappedData + offset + i * width, (const uint8_t *) src + i * spitch, width);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return { s };
|
return { s };
|
||||||
|
@ -975,11 +996,9 @@ static void ggml_vk_buffer_write_2d(vk_buffer* dst, size_t offset, const void *
|
||||||
if(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
if(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
||||||
GGML_ASSERT(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
GGML_ASSERT(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
||||||
|
|
||||||
PROFILE("ggml_vk_buffer_write visible",
|
|
||||||
for (size_t i = 0; i < height; i++) {
|
for (size_t i = 0; i < height; i++) {
|
||||||
memcpy((uint8_t *)dst->info.pMappedData + offset + i * width, (const uint8_t *) src + i * spitch, width);
|
memcpy((uint8_t *)dst->info.pMappedData + offset + i * width, (const uint8_t *) src + i * spitch, width);
|
||||||
}
|
}
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
vk::Fence fence = vk_device.createFence({});
|
vk::Fence fence = vk_device.createFence({});
|
||||||
std::vector<vk_sequence> s = { ggml_vk_buffer_write_2d_async(dst, offset, src, spitch, width, height, q, {}, {}) };
|
std::vector<vk_sequence> s = { ggml_vk_buffer_write_2d_async(dst, offset, src, spitch, width, height, q, {}, {}) };
|
||||||
|
@ -988,6 +1007,112 @@ static void ggml_vk_buffer_write_2d(vk_buffer* dst, size_t offset, const void *
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline size_t ggml_vk_align_size(size_t width, size_t align) {
|
||||||
|
return CEIL_DIV(width, align) * align;
|
||||||
|
}
|
||||||
|
|
||||||
|
static vk_sequence ggml_vk_buffer_write_2d_async_zeropad(vk_buffer* dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, size_t align, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
||||||
|
#ifdef VK_DEBUG
|
||||||
|
std::cerr << "ggml_vk_buffer_write_2d_async_zeropad(" << offset << ", " << spitch << ", " << width << ", " << height << ", " << align << ")" << std::endl;
|
||||||
|
#endif
|
||||||
|
VkMemoryPropertyFlags mem_prop_flags;
|
||||||
|
vmaGetAllocationMemoryProperties(vk_allocator, dst->allocation, &mem_prop_flags);
|
||||||
|
|
||||||
|
// Buffer is already mapped
|
||||||
|
if(mem_prop_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
||||||
|
std::cerr << "ggml_vulkan: buffer_write_2d_async_zeropad dst buffer is host_visible. Use synchronous write." << std::endl;
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
// Check if src is pinned memory
|
||||||
|
vk_buffer* buf = nullptr;
|
||||||
|
size_t buf_offset = 0;
|
||||||
|
for (size_t i = 0; i < vk_buf_list.size(); i++) {
|
||||||
|
const uint8_t* addr = (const uint8_t*) std::get<0>(vk_buf_list[i]);
|
||||||
|
const uint8_t* endr = addr + std::get<1>(vk_buf_list[i]);
|
||||||
|
if (src >= addr && src < endr) {
|
||||||
|
buf = &std::get<2>(vk_buf_list[i]);
|
||||||
|
buf_offset = ((const uint8_t *)src) - addr;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Align slices to the value of align
|
||||||
|
const uint32_t padded_width = ggml_vk_align_size(width, align);
|
||||||
|
|
||||||
|
if (buf != nullptr) {
|
||||||
|
vk_submission s = ggml_vk_create_submission(q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
|
|
||||||
|
std::vector<vk::BufferCopy> slices(1);
|
||||||
|
if (width == padded_width && width == spitch) {
|
||||||
|
// Only do single write if no padding happens
|
||||||
|
slices[0].srcOffset = buf_offset;
|
||||||
|
slices[0].dstOffset = offset;
|
||||||
|
slices[0].size = width * height;
|
||||||
|
} else {
|
||||||
|
slices.resize(height);
|
||||||
|
for (size_t i = 0; i < height; i++) {
|
||||||
|
slices[i].srcOffset = buf_offset + i * spitch;
|
||||||
|
slices[i].dstOffset = offset + i * padded_width;
|
||||||
|
slices[i].size = width;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
||||||
|
ggml_vk_sync_buffers(s.buffer, { *dst }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eMemoryWrite, false);
|
||||||
|
if (padded_width > width) {
|
||||||
|
s.buffer.fillBuffer(dst->buffer, 0, VK_WHOLE_SIZE, 0);
|
||||||
|
}
|
||||||
|
s.buffer.pipelineBarrier(
|
||||||
|
q.stage_flags,
|
||||||
|
q.stage_flags,
|
||||||
|
{},
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
{ vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryWrite, vk::QueueFamilyIgnored, vk::QueueFamilyIgnored, dst->buffer, 0, VK_WHOLE_SIZE }
|
||||||
|
},
|
||||||
|
{}
|
||||||
|
);
|
||||||
|
s.buffer.copyBuffer(buf->buffer, dst->buffer, slices);
|
||||||
|
s.buffer.end();
|
||||||
|
return { s };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Staging buffer required, malloc because of async transfer
|
||||||
|
if (dst->sb_write == nullptr) {
|
||||||
|
dst->sb_write = new vk_buffer;
|
||||||
|
*dst->sb_write = ggml_vk_create_buffer(dst->size, VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_HOST, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
vk_submission s = ggml_vk_create_submission(q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
|
|
||||||
|
VkMemoryPropertyFlags mpf_staging;
|
||||||
|
vmaGetAllocationMemoryProperties(vk_allocator, dst->sb_write->allocation, &mpf_staging);
|
||||||
|
GGML_ASSERT(mpf_staging & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
|
||||||
|
|
||||||
|
vk::BufferCopy buf_copy = {
|
||||||
|
0,
|
||||||
|
offset,
|
||||||
|
padded_width * height};
|
||||||
|
|
||||||
|
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
||||||
|
ggml_vk_sync_buffers(s.buffer, { *dst }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eTransferWrite, false);
|
||||||
|
s.buffer.copyBuffer(dst->sb_write->buffer, dst->buffer, { buf_copy });
|
||||||
|
s.buffer.end();
|
||||||
|
|
||||||
|
const size_t zeropad = padded_width - width;
|
||||||
|
|
||||||
|
if (width == padded_width && width == spitch) {
|
||||||
|
memcpy(dst->sb_write->info.pMappedData, src, width * height);
|
||||||
|
} else {
|
||||||
|
for (size_t i = 0; i < height; i++) {
|
||||||
|
memcpy((uint8_t *)dst->sb_write->info.pMappedData + i * padded_width, (const uint8_t *) src + i * spitch, width);
|
||||||
|
memset((uint8_t *)dst->sb_write->info.pMappedData + i * padded_width + width, 0, zeropad);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { s };
|
||||||
|
}
|
||||||
|
|
||||||
static vk_sequence ggml_vk_buffer_write_async(vk_buffer* dst, size_t offset, const void * src, size_t size, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
static vk_sequence ggml_vk_buffer_write_async(vk_buffer* dst, size_t offset, const void * src, size_t size, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_buffer_write_async(" << size << ")" << std::endl;
|
std::cerr << "ggml_vk_buffer_write_async(" << size << ")" << std::endl;
|
||||||
|
@ -1031,7 +1156,7 @@ static vk_sequence ggml_vk_buffer_read_async(vk_buffer* src, size_t offset, void
|
||||||
|
|
||||||
vk_submission s = ggml_vk_create_submission(q, std::move(wait_semaphores), std::move(signal_semaphores));
|
vk_submission s = ggml_vk_create_submission(q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
||||||
ggml_vk_sync_buffers(s.buffer, { *src }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead);
|
ggml_vk_sync_buffers(s.buffer, { *src }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead, false);
|
||||||
vkCmdCopyBuffer(s.buffer, src->buffer, buf->buffer, 1, &buf_copy);
|
vkCmdCopyBuffer(s.buffer, src->buffer, buf->buffer, 1, &buf_copy);
|
||||||
s.buffer.end();
|
s.buffer.end();
|
||||||
|
|
||||||
|
@ -1073,7 +1198,7 @@ static void ggml_vk_buffer_read(vk_buffer* src, size_t offset, void * dst, size_
|
||||||
|
|
||||||
std::vector<vk_sequence> s = { ggml_vk_create_sequence_1(q, {}, {}) };
|
std::vector<vk_sequence> s = { ggml_vk_create_sequence_1(q, {}, {}) };
|
||||||
s[0][0].buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
s[0][0].buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
|
||||||
ggml_vk_sync_buffers(s[0][0].buffer, { *src }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead);
|
ggml_vk_sync_buffers(s[0][0].buffer, { *src }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eTransferRead, false);
|
||||||
vkCmdCopyBuffer(s[0][0].buffer, src->buffer, buf->buffer, 1, &buf_copy);
|
vkCmdCopyBuffer(s[0][0].buffer, src->buffer, buf->buffer, 1, &buf_copy);
|
||||||
s[0][0].buffer.end();
|
s[0][0].buffer.end();
|
||||||
ggml_vk_submit(q, s, fence);
|
ggml_vk_submit(q, s, fence);
|
||||||
|
@ -1098,7 +1223,7 @@ static void ggml_vk_buffer_read(vk_buffer* src, size_t offset, void * dst, size_
|
||||||
vk::CommandBuffer cmd_buffer = ggml_vk_create_cmd_buffer(q);
|
vk::CommandBuffer cmd_buffer = ggml_vk_create_cmd_buffer(q);
|
||||||
vk::CommandBufferBeginInfo cmd_buffer_begin_info(vk::CommandBufferUsageFlagBits::eOneTimeSubmit);
|
vk::CommandBufferBeginInfo cmd_buffer_begin_info(vk::CommandBufferUsageFlagBits::eOneTimeSubmit);
|
||||||
cmd_buffer.begin(cmd_buffer_begin_info);
|
cmd_buffer.begin(cmd_buffer_begin_info);
|
||||||
ggml_vk_sync_buffers(cmd_buffer, { *src }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead);
|
ggml_vk_sync_buffers(cmd_buffer, { *src }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eTransferRead, false);
|
||||||
vkCmdCopyBuffer(cmd_buffer, src->buffer, src->sb_read->buffer, 1, &buf_copy);
|
vkCmdCopyBuffer(cmd_buffer, src->buffer, src->sb_read->buffer, 1, &buf_copy);
|
||||||
cmd_buffer.end();
|
cmd_buffer.end();
|
||||||
|
|
||||||
|
@ -1117,7 +1242,7 @@ static void ggml_vk_buffer_read(vk_buffer* src, size_t offset, void * dst, size_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static vk_sequence ggml_vk_h2d_tensor_2d(vk_buffer* dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
static vk_sequence ggml_vk_h2d_tensor_2d(vk_buffer* dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, size_t align, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_h2d_tensor_2d()" << std::endl;
|
std::cerr << "ggml_vk_h2d_tensor_2d()" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
|
@ -1134,10 +1259,11 @@ static vk_sequence ggml_vk_h2d_tensor_2d(vk_buffer* dst, size_t offset, const st
|
||||||
|
|
||||||
const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
|
const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
|
||||||
if (nb0 == ts && nb1 == row_length) {
|
if (nb0 == ts && nb1 == row_length) {
|
||||||
return ggml_vk_buffer_write_async(dst, offset, x, ne1*nb1, q, std::move(wait_semaphores), std::move(signal_semaphores));
|
// return ggml_vk_buffer_write_async(dst, offset, x, ne1*nb1, q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
|
return ggml_vk_buffer_write_2d_async_zeropad(dst, offset, x, nb1, row_length, ne1, align, q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
}
|
}
|
||||||
if (nb0 == ts) {
|
if (nb0 == ts) {
|
||||||
return ggml_vk_buffer_write_2d_async(dst, offset, x, nb1, row_length, ne1, q, std::move(wait_semaphores), std::move(signal_semaphores));
|
return ggml_vk_buffer_write_2d_async_zeropad(dst, offset, x, nb1, row_length, ne1, align, q, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
}
|
}
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
// TODO: also needs handling of staging buffers
|
// TODO: also needs handling of staging buffers
|
||||||
|
@ -1184,30 +1310,26 @@ static vk_pipeline* ggml_vk_guess_matmul_pipeline(bool bit16, int m, int n) {
|
||||||
return &vk_pipeline_matmul_f32_l;
|
return &vk_pipeline_matmul_f32_l;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vk_sequence ggml_vk_matmul(vk_pipeline& pipeline, vk_buffer& a, vk_buffer& b, vk_buffer& d, int m, int n, int k, int split_k, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
static vk_sequence ggml_vk_matmul(vk_pipeline& pipeline, vk_buffer& a, vk_buffer& b, vk_buffer& d, int m, int n, int k, int stride_a, int stride_b, int stride_d, int split_k, vk_queue& q, std::vector<vk::Semaphore>&& wait_semaphores, std::vector<vk::Semaphore>&& signal_semaphores) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_matmul(" << m << ", " << n << ", " << k << ")" << std::endl;
|
std::cerr << "ggml_vk_matmul(" << m << ", " << n << ", " << k << ")" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
vk_submission s = ggml_vk_begin_submission(q);
|
vk_submission s = ggml_vk_begin_submission(q);
|
||||||
|
ggml_vk_sync_buffers(s.buffer, { a, b }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eShaderRead, false);
|
||||||
|
ggml_vk_sync_buffers(s.buffer, { d }, q, vk::AccessFlagBits::eMemoryRead, vk::AccessFlagBits::eShaderWrite, false);
|
||||||
if (split_k == 1) {
|
if (split_k == 1) {
|
||||||
ggml_vk_dispatch_pipeline(s, pipeline, { a, b, d }, { m, n, k, k, k, m, k }, { (uint32_t)m, (uint32_t)n, 1 }, q);
|
const std::vector<int> pc = { m, n, k, stride_a, stride_b, stride_d, k };
|
||||||
|
ggml_vk_dispatch_pipeline(s, pipeline, { a, b, d }, pc.size() * sizeof(int), pc.data(), { (uint32_t)m, (uint32_t)n, 1 }, q);
|
||||||
ggml_vk_end_submission(s, std::move(wait_semaphores), std::move(signal_semaphores));
|
ggml_vk_end_submission(s, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
return { s };
|
return { s };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronize the two submissions
|
// Synchronize the two submissions
|
||||||
ggml_vk_dispatch_pipeline(s, pipeline, { a, b, d }, { m, n, k, k, k, m, CEIL_DIV(k, split_k) }, { (uint32_t)m * split_k, (uint32_t)n, 1 }, q);
|
const std::vector<int> pc1 = { m, n, k, stride_a, stride_b, stride_d, CEIL_DIV(stride_a, split_k) };
|
||||||
|
ggml_vk_dispatch_pipeline(s, pipeline, { a, b, d }, pc1.size() * sizeof(int), pc1.data(), { (uint32_t)m * split_k, (uint32_t)n, 1 }, q);
|
||||||
s.buffer.pipelineBarrier(
|
ggml_vk_sync_buffers(s.buffer, { d }, q, vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite, true);
|
||||||
q.stage_flags,
|
const std::vector<int> pc2 = { m, n, split_k };
|
||||||
q.stage_flags,
|
ggml_vk_dispatch_pipeline(s, vk_pipeline_matmul_split_k_reduce, { d }, pc2.size() * sizeof(int), pc2.data(), { (uint32_t)m, (uint32_t)n, 1 }, q);
|
||||||
{},
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
{ vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite, vk::QueueFamilyIgnored, vk::QueueFamilyIgnored, d.buffer, 0, VK_WHOLE_SIZE } },
|
|
||||||
{}
|
|
||||||
);
|
|
||||||
ggml_vk_dispatch_pipeline(s, vk_pipeline_matmul_split_k_reduce, { d }, { m, n, split_k }, { (uint32_t)m, (uint32_t)n, 1 }, q);
|
|
||||||
ggml_vk_end_submission(s, std::move(wait_semaphores), std::move(signal_semaphores));
|
ggml_vk_end_submission(s, std::move(wait_semaphores), std::move(signal_semaphores));
|
||||||
|
|
||||||
return { s };
|
return { s };
|
||||||
|
@ -1215,7 +1337,9 @@ static vk_sequence ggml_vk_matmul(vk_pipeline& pipeline, vk_buffer& a, vk_buffer
|
||||||
|
|
||||||
static void ggml_vk_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
static void ggml_vk_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_mul_mat_f32()" << std::endl;
|
std::cerr << "ggml_vk_mul_mat_f32((type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3];
|
||||||
|
std::cerr << "), (type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3];
|
||||||
|
std::cerr << "), (type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << "),)" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t ne01 = src0->ne[1];
|
const int64_t ne01 = src0->ne[1];
|
||||||
|
@ -1228,22 +1352,22 @@ static void ggml_vk_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
const int nb2 = dst->nb[2];
|
const int nb2 = dst->nb[2];
|
||||||
const int nb3 = dst->nb[3];
|
const int nb3 = dst->nb[3];
|
||||||
|
|
||||||
const int x_ne = ne01 * ne00;
|
|
||||||
const int y_ne = ne11 * ne10;
|
|
||||||
const int d_ne = ne11 * ne01;
|
const int d_ne = ne11 * ne01;
|
||||||
|
|
||||||
const int split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
|
const int split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
|
||||||
vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(false, ne01, ne11);
|
vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(false, ne01, ne11);
|
||||||
|
|
||||||
|
const int kpad = ggml_vk_align_size(ne10, pipeline->align);
|
||||||
|
|
||||||
vk_buffer d_X;
|
vk_buffer d_X;
|
||||||
vk_buffer d_Y;
|
vk_buffer d_Y;
|
||||||
vk_buffer d_D;
|
vk_buffer d_D;
|
||||||
if (src0->backend == GGML_BACKEND_GPU) {
|
if (src0->backend == GGML_BACKEND_GPU) {
|
||||||
d_X = *(vk_buffer*) src0->data;
|
d_X = *(vk_buffer*) src0->data;
|
||||||
} else {
|
} else {
|
||||||
ggml_vk_pool_malloc(sizeof(float) * x_ne, &d_X, 0);
|
ggml_vk_pool_malloc(sizeof(float) * kpad * ne01, &d_X, 0);
|
||||||
}
|
}
|
||||||
ggml_vk_pool_malloc(sizeof(float) * y_ne, &d_Y, 0);
|
ggml_vk_pool_malloc(sizeof(float) * kpad * ne11, &d_Y, 0);
|
||||||
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
||||||
|
|
||||||
std::vector<vk_sequence> compute_seqs;
|
std::vector<vk_sequence> compute_seqs;
|
||||||
|
@ -1268,20 +1392,20 @@ static void ggml_vk_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
s_x = ggml_vk_create_semaphore(vk_compute_queue);
|
s_x = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
semaphores.push_back(s_x);
|
semaphores.push_back(s_x);
|
||||||
if (first) {
|
if (first) {
|
||||||
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, vk_transfer_queues[0], {}, { s_x }));
|
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, pipeline->align * sizeof(float), vk_transfer_queues[0], {}, { s_x }));
|
||||||
} else {
|
} else {
|
||||||
// Wait for previous matmul to be done before writing to the input buffers again
|
// Wait for previous matmul to be done before writing to the input buffers again
|
||||||
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, vk_transfer_queues[0], { s_it_x }, { s_x }));
|
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, pipeline->align * sizeof(float), vk_transfer_queues[0], { s_it_x }, { s_x }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_vk_submit(vk_transfer_queues[0], transfer_0_seqs, VK_NULL_HANDLE);
|
ggml_vk_submit(vk_transfer_queues[0], transfer_0_seqs, VK_NULL_HANDLE);
|
||||||
|
|
||||||
if (first) {
|
if (first) {
|
||||||
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, vk_transfer_queues[1], {}, { s_y }));
|
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, pipeline->align * sizeof(float), vk_transfer_queues[1], {}, { s_y }));
|
||||||
} else {
|
} else {
|
||||||
// Wait for previous matmul to be done before writing to the input buffers again
|
// Wait for previous matmul to be done before writing to the input buffers again
|
||||||
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, vk_transfer_queues[1], { s_it_y }, { s_y }));
|
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, pipeline->align * sizeof(float), vk_transfer_queues[1], { s_it_y }, { s_y }));
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute
|
// compute
|
||||||
|
@ -1291,13 +1415,13 @@ static void ggml_vk_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
if (load_x) {
|
if (load_x) {
|
||||||
s_it_x = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_x = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_x, s_it_y }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_x, s_it_y }));
|
||||||
} else {
|
} else {
|
||||||
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_y }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_y }));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm }));
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy dst to host
|
// copy dst to host
|
||||||
|
@ -1325,14 +1449,15 @@ static void ggml_vk_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
|
|
||||||
static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata) {
|
static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_mul_mat_f16()" << std::endl;
|
std::cerr << "ggml_vk_mul_mat_f16((type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3];
|
||||||
|
std::cerr << "), (type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3];
|
||||||
|
std::cerr << "), (type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << "),)" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
GGML_ASSERT(vk_fp16_support);
|
GGML_ASSERT(vk_fp16_support);
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F16);
|
GGML_ASSERT(src0->type == GGML_TYPE_F16);
|
||||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
|
||||||
const int64_t ne01 = src0->ne[1];
|
const int64_t ne01 = src0->ne[1];
|
||||||
const int64_t ne02 = src0->ne[2];
|
const int64_t ne02 = src0->ne[2];
|
||||||
const int64_t ne03 = src0->ne[3];
|
const int64_t ne03 = src0->ne[3];
|
||||||
|
@ -1348,22 +1473,22 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
const int nb2 = dst->nb[2];
|
const int nb2 = dst->nb[2];
|
||||||
const int nb3 = dst->nb[3];
|
const int nb3 = dst->nb[3];
|
||||||
|
|
||||||
const int x_ne = ne01 * ne00;
|
|
||||||
const int y_ne = ne11 * ne10;
|
|
||||||
const int d_ne = ne11 * ne01;
|
const int d_ne = ne11 * ne01;
|
||||||
|
|
||||||
const int split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
|
const int split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
|
||||||
vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(true, ne01, ne11);
|
vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(true, ne01, ne11);
|
||||||
|
|
||||||
|
const int kpad = ggml_vk_align_size(ne10, pipeline->align);
|
||||||
|
|
||||||
vk_buffer d_X;
|
vk_buffer d_X;
|
||||||
vk_buffer d_Y;
|
vk_buffer d_Y;
|
||||||
vk_buffer d_D;
|
vk_buffer d_D;
|
||||||
if (src0->backend == GGML_BACKEND_GPU) {
|
if (src0->backend == GGML_BACKEND_GPU) {
|
||||||
d_X = *(vk_buffer*) src0->data;
|
d_X = *(vk_buffer*) src0->data;
|
||||||
} else {
|
} else {
|
||||||
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &d_X, 0);
|
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * kpad * ne01, &d_X, 0);
|
||||||
}
|
}
|
||||||
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * y_ne, &d_Y, 0);
|
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * kpad * ne11, &d_Y, 0);
|
||||||
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
||||||
|
|
||||||
bool src1_cont_rows = nb10 == sizeof(float);
|
bool src1_cont_rows = nb10 == sizeof(float);
|
||||||
|
@ -1392,10 +1517,10 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
s_x = ggml_vk_create_semaphore(vk_compute_queue);
|
s_x = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
semaphores.push_back(s_x);
|
semaphores.push_back(s_x);
|
||||||
if (first) {
|
if (first) {
|
||||||
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, vk_transfer_queues[0], {}, { s_x }));
|
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, pipeline->align * sizeof(ggml_fp16_t), vk_transfer_queues[0], {}, { s_x }));
|
||||||
} else {
|
} else {
|
||||||
// Wait for previous matmul to be done before writing to the input buffers again
|
// Wait for previous matmul to be done before writing to the input buffers again
|
||||||
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, vk_transfer_queues[0], { s_it_x }, { s_x }));
|
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_X, 0, src0, i03, i02, pipeline->align * sizeof(ggml_fp16_t), vk_transfer_queues[0], { s_it_x }, { s_x }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1403,6 +1528,7 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
|
|
||||||
// convert src1 to fp16
|
// convert src1 to fp16
|
||||||
// TODO: use multiple threads
|
// TODO: use multiple threads
|
||||||
|
// TODO: This memory isn't pinned
|
||||||
ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata + (ne11 * ne10) * (i03 * ne02 + i02);
|
ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata + (ne11 * ne10) * (i03 * ne02 + i02);
|
||||||
char * src1i = (char *) src1->data + i03*nb13 + i02*nb12;
|
char * src1i = (char *) src1->data + i03*nb13 + i02*nb12;
|
||||||
if (src1_cont_rows) {
|
if (src1_cont_rows) {
|
||||||
|
@ -1414,8 +1540,7 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
ggml_fp32_to_fp16_row((float *) (src1i + i01*nb11), tmp + i01*ne10, ne10);
|
ggml_fp32_to_fp16_row((float *) (src1i + i01*nb11), tmp + i01*ne10, ne10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
for (int64_t i01 = 0; i01 < ne11; i01++) {
|
for (int64_t i01 = 0; i01 < ne11; i01++) {
|
||||||
for (int64_t i00 = 0; i00 < ne10; i00++) {
|
for (int64_t i00 = 0; i00 < ne10; i00++) {
|
||||||
// very slow due to no inlining
|
// very slow due to no inlining
|
||||||
|
@ -1425,10 +1550,10 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first) {
|
if (first) {
|
||||||
transfer_1_seqs.push_back(ggml_vk_buffer_write_async(&d_Y, 0, tmp, sizeof(ggml_fp16_t) * y_ne, vk_transfer_queues[1], {}, { s_y }));
|
transfer_1_seqs.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_Y, 0, tmp, sizeof(ggml_fp16_t) * ne10, sizeof(ggml_fp16_t) * ne10, ne11, pipeline->align * sizeof(ggml_fp16_t), vk_transfer_queues[1], {}, { s_y }));
|
||||||
} else {
|
} else {
|
||||||
// Wait for previous matmul to be done before writing to the input buffers again
|
// Wait for previous matmul to be done before writing to the input buffers again
|
||||||
transfer_1_seqs.push_back(ggml_vk_buffer_write_async(&d_Y, 0, tmp, sizeof(ggml_fp16_t) * y_ne, vk_transfer_queues[1], { s_it_y }, { s_y }));
|
transfer_1_seqs.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_Y, 0, tmp, sizeof(ggml_fp16_t) * ne10, sizeof(ggml_fp16_t) * ne10, ne11, pipeline->align * sizeof(ggml_fp16_t), vk_transfer_queues[1], { s_it_y }, { s_y }));
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute
|
// compute
|
||||||
|
@ -1437,13 +1562,13 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
if (load_x) {
|
if (load_x) {
|
||||||
s_it_x = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_x = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_x, s_it_y }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_x, s_it_y }));
|
||||||
} else {
|
} else {
|
||||||
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_y }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_y }));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm }));
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy dst to host
|
// copy dst to host
|
||||||
|
@ -1456,7 +1581,6 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_vk_submit(vk_transfer_queues[0], transfer_0_seqs, VK_NULL_HANDLE);
|
ggml_vk_submit(vk_transfer_queues[0], transfer_0_seqs, VK_NULL_HANDLE);
|
||||||
// vk_transfer_queues[0].queue.waitIdle();
|
|
||||||
|
|
||||||
// cleanup waits for the queue to be done
|
// cleanup waits for the queue to be done
|
||||||
ggml_vk_queue_cleanup(vk_transfer_queues[0]);
|
ggml_vk_queue_cleanup(vk_transfer_queues[0]);
|
||||||
|
@ -1472,7 +1596,9 @@ static void ggml_vk_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
|
|
||||||
static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
#ifdef VK_DEBUG
|
#ifdef VK_DEBUG
|
||||||
std::cerr << "ggml_vk_mul_mat_q_f32()" << std::endl;
|
std::cerr << "ggml_vk_mul_mat_q_f32((type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3];
|
||||||
|
std::cerr << "), (type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3];
|
||||||
|
std::cerr << "), (type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << "),)" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t ne01 = src0->ne[1];
|
const int64_t ne01 = src0->ne[1];
|
||||||
|
@ -1495,13 +1621,15 @@ static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
const int split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
|
const int split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
|
||||||
vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(false, ne01, ne11);
|
vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(false, ne01, ne11);
|
||||||
|
|
||||||
|
const int kpad = ggml_vk_align_size(ne10, pipeline->align);
|
||||||
|
|
||||||
vk_buffer d_X;
|
vk_buffer d_X;
|
||||||
vk_buffer d_Y;
|
vk_buffer d_Y;
|
||||||
vk_buffer d_D;
|
vk_buffer d_D;
|
||||||
if (!mul_mat_vec) {
|
if (!mul_mat_vec) {
|
||||||
ggml_vk_pool_malloc(sizeof(float) * x_ne, &d_X, 0);
|
ggml_vk_pool_malloc(sizeof(float) * kpad * ne01, &d_X, 0);
|
||||||
}
|
}
|
||||||
ggml_vk_pool_malloc(sizeof(float) * y_ne, &d_Y, 0);
|
ggml_vk_pool_malloc(sizeof(float) * kpad * ne11, &d_Y, 0);
|
||||||
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
||||||
vk_buffer d_Q;
|
vk_buffer d_Q;
|
||||||
if (src0->backend == GGML_BACKEND_CPU) {
|
if (src0->backend == GGML_BACKEND_CPU) {
|
||||||
|
@ -1540,10 +1668,10 @@ static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
s_x = ggml_vk_create_semaphore(vk_compute_queue);
|
s_x = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
q_semaphores.push_back(s_x);
|
q_semaphores.push_back(s_x);
|
||||||
if (first) {
|
if (first) {
|
||||||
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Q, 0, src0, i03, i02, vk_transfer_queues[0], {}, { s_x }));
|
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Q, 0, src0, i03, i02, 1, vk_transfer_queues[0], {}, { s_x }));
|
||||||
} else {
|
} else {
|
||||||
// Wait for previous dequant to be done before writing to the input buffers again
|
// Wait for previous dequant to be done before writing to the input buffers again
|
||||||
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Q, 0, src0, i03, i02, vk_transfer_queues[0], { s_it_x }, { s_x }));
|
transfer_0_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Q, 0, src0, i03, i02, 1, vk_transfer_queues[0], { s_it_x }, { s_x }));
|
||||||
}
|
}
|
||||||
} else if (src0->backend == GGML_BACKEND_GPU) {
|
} else if (src0->backend == GGML_BACKEND_GPU) {
|
||||||
d_Q = *(vk_buffer *) src0->data;
|
d_Q = *(vk_buffer *) src0->data;
|
||||||
|
@ -1555,10 +1683,10 @@ static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
|
|
||||||
// copy src1 to device
|
// copy src1 to device
|
||||||
if (first) {
|
if (first) {
|
||||||
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, vk_transfer_queues[1], {}, { s_y }));
|
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, pipeline->align * sizeof(float), vk_transfer_queues[1], {}, { s_y }));
|
||||||
} else {
|
} else {
|
||||||
// Wait for previous matmul to be done before writing to the input buffers again
|
// Wait for previous matmul to be done before writing to the input buffers again
|
||||||
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, vk_transfer_queues[1], { s_it_y }, { s_y }));
|
transfer_1_seqs.push_back(ggml_vk_h2d_tensor_2d(&d_Y, 0, src1, i03, i02, pipeline->align * sizeof(float), vk_transfer_queues[1], { s_it_y }, { s_y }));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
|
if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
|
||||||
|
@ -1582,7 +1710,10 @@ static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
|
|
||||||
// convert src0 to fp32 on device
|
// convert src0 to fp32 on device
|
||||||
vk_submission s = ggml_vk_begin_submission(vk_compute_queue);
|
vk_submission s = ggml_vk_begin_submission(vk_compute_queue);
|
||||||
ggml_vk_dispatch_pipeline(s, *to_fp32_vk, {d_Q, d_X}, { (int)x_ne }, { (uint32_t)x_ne, 1, 1}, vk_compute_queue);
|
const std::vector<int> pc = { (int)ne01, (int)ne10, (int)ne10, kpad };
|
||||||
|
ggml_vk_sync_buffers(s.buffer, { d_Q }, vk_compute_queue, vk::AccessFlagBits::eTransferWrite, vk::AccessFlagBits::eShaderRead, false);
|
||||||
|
ggml_vk_sync_buffers(s.buffer, { d_X }, vk_compute_queue, vk::AccessFlagBits::eShaderRead, vk::AccessFlagBits::eShaderWrite, false);
|
||||||
|
ggml_vk_dispatch_pipeline(s, *to_fp32_vk, {d_Q, d_X}, pc.size() * sizeof(int), pc.data(), { (uint32_t)x_ne, 1, 1}, vk_compute_queue);
|
||||||
if (load_x && !last) {
|
if (load_x && !last) {
|
||||||
s_it_x = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_x = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
ggml_vk_end_submission(s, std::move(q_semaphores), { s_q, s_it_x });
|
ggml_vk_end_submission(s, std::move(q_semaphores), { s_q, s_it_x });
|
||||||
|
@ -1594,9 +1725,9 @@ static void ggml_vk_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
// compute
|
// compute
|
||||||
if (!last) {
|
if (!last) {
|
||||||
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
s_it_y = ggml_vk_create_semaphore(vk_compute_queue);
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_y }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm, s_it_y }));
|
||||||
} else {
|
} else {
|
||||||
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, split_k, vk_compute_queue, std::move(semaphores), { s_mm }));
|
compute_seqs.push_back(ggml_vk_matmul(*pipeline, d_X, d_Y, d_D, ne01, ne11, ne10, kpad, kpad, ne01, split_k, vk_compute_queue, std::move(semaphores), { s_mm }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1730,7 +1861,7 @@ void ggml_vk_test_transfer(size_t ne) {
|
||||||
|
|
||||||
double kb = ne * sizeof(float) / 1024.0;
|
double kb = ne * sizeof(float) / 1024.0;
|
||||||
|
|
||||||
std::cout << "TEST TRANSFER " << kb << " KB to_gpu " << ms_to_gpu << "ms (" << kb / ms_to_gpu * 1000.0 / 1024.0 << " MB/s) from_gpu " << ms_from_gpu << "ms (" << kb / ms_from_gpu * 1000.0 / 1024.0 << " MB/s) avg_err=" << avg_err / ne << std::endl;
|
std::cerr << "TEST TRANSFER " << kb << " KB to_gpu " << ms_to_gpu << "ms (" << kb / ms_to_gpu * 1000.0 / 1024.0 << " MB/s) from_gpu " << ms_from_gpu << "ms (" << kb / ms_from_gpu * 1000.0 / 1024.0 << " MB/s) avg_err=" << avg_err / ne << std::endl;
|
||||||
|
|
||||||
ggml_vk_destroy_buffer(buffer);
|
ggml_vk_destroy_buffer(buffer);
|
||||||
|
|
||||||
|
@ -1742,31 +1873,6 @@ void ggml_vk_test_matmul_f32(size_t m, size_t n, size_t k, size_t num_it, int sp
|
||||||
const size_t y_ne = k * n;
|
const size_t y_ne = k * n;
|
||||||
const size_t d_ne = m * n;
|
const size_t d_ne = m * n;
|
||||||
|
|
||||||
vk_buffer d_X;
|
|
||||||
vk_buffer d_Y;
|
|
||||||
vk_buffer d_D;
|
|
||||||
ggml_vk_pool_malloc(sizeof(float) * x_ne, &d_X, 0);
|
|
||||||
ggml_vk_pool_malloc(sizeof(float) * y_ne, &d_Y, 0);
|
|
||||||
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
|
||||||
|
|
||||||
float* x = (float *) malloc(sizeof(float) * x_ne);
|
|
||||||
float* y = (float *) malloc(sizeof(float) * y_ne);
|
|
||||||
float* d = (float *) malloc(sizeof(float) * d_ne);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < x_ne; i++) {
|
|
||||||
x[i] = rand() / (float)RAND_MAX;
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < y_ne; i++) {
|
|
||||||
y[i] = rand() / (float)RAND_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_vk_buffer_write(&d_X, 0, x, sizeof(float) * x_ne, vk_transfer_queues[0]);
|
|
||||||
ggml_vk_buffer_write(&d_Y, 0, y, sizeof(float) * y_ne, vk_transfer_queues[1]);
|
|
||||||
|
|
||||||
// Wait for transfers to finish
|
|
||||||
vk_transfer_queues[0].queue.waitIdle();
|
|
||||||
vk_transfer_queues[1].queue.waitIdle();
|
|
||||||
|
|
||||||
std::vector<vk_sequence> seq;
|
std::vector<vk_sequence> seq;
|
||||||
|
|
||||||
vk_pipeline * p;
|
vk_pipeline * p;
|
||||||
|
@ -1784,10 +1890,38 @@ void ggml_vk_test_matmul_f32(size_t m, size_t n, size_t k, size_t num_it, int sp
|
||||||
GGML_ASSERT(0);
|
GGML_ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const size_t kpad = ggml_vk_align_size(k, p->align);
|
||||||
|
|
||||||
|
vk_buffer d_X;
|
||||||
|
vk_buffer d_Y;
|
||||||
|
vk_buffer d_D;
|
||||||
|
ggml_vk_pool_malloc(sizeof(float) * kpad * m, &d_X, 0);
|
||||||
|
ggml_vk_pool_malloc(sizeof(float) * kpad * n, &d_Y, 0);
|
||||||
|
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
||||||
|
|
||||||
|
float* x = (float *) malloc(sizeof(float) * x_ne);
|
||||||
|
float* y = (float *) malloc(sizeof(float) * y_ne);
|
||||||
|
float* d = (float *) malloc(sizeof(float) * d_ne);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < x_ne; i++) {
|
||||||
|
x[i] = rand() / (float)RAND_MAX;
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < y_ne; i++) {
|
||||||
|
y[i] = rand() / (float)RAND_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
seq.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_X, 0, x, sizeof(float) * k, sizeof(float) * k, m, sizeof(float) * p->align, vk_transfer_queues[0], {}, {}));
|
||||||
|
seq.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_Y, 0, y, sizeof(float) * k, sizeof(float) * k, n, sizeof(float) * p->align, vk_transfer_queues[0], {}, {}));
|
||||||
|
|
||||||
|
ggml_vk_submit(vk_transfer_queues[0], seq, VK_NULL_HANDLE);
|
||||||
|
|
||||||
|
// Wait for transfers to finish
|
||||||
|
vk_transfer_queues[0].queue.waitIdle();
|
||||||
|
|
||||||
auto begin = std::chrono::high_resolution_clock::now();
|
auto begin = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
for (size_t i = 0; i < num_it; i++) {
|
for (size_t i = 0; i < num_it; i++) {
|
||||||
seq.push_back(ggml_vk_matmul(*p, d_X, d_Y, d_D, m, n, k, split_k, vk_compute_queue, {}, {}));
|
seq.push_back(ggml_vk_matmul(*p, d_X, d_Y, d_D, m, n, k, kpad, kpad, m, split_k, vk_compute_queue, {}, {}));
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_vk_submit(vk_compute_queue, seq, VK_NULL_HANDLE);
|
ggml_vk_submit(vk_compute_queue, seq, VK_NULL_HANDLE);
|
||||||
|
@ -1840,30 +1974,6 @@ void ggml_vk_test_matmul_f16(size_t m, size_t n, size_t k, size_t num_it, int sp
|
||||||
const size_t y_ne = k * n;
|
const size_t y_ne = k * n;
|
||||||
const size_t d_ne = m * n;
|
const size_t d_ne = m * n;
|
||||||
|
|
||||||
vk_buffer d_X;
|
|
||||||
vk_buffer d_Y;
|
|
||||||
vk_buffer d_D;
|
|
||||||
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &d_X, 0);
|
|
||||||
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * y_ne, &d_Y, 0);
|
|
||||||
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
|
||||||
|
|
||||||
ggml_fp16_t* x = (ggml_fp16_t *) malloc(sizeof(ggml_fp16_t) * x_ne);
|
|
||||||
ggml_fp16_t* y = (ggml_fp16_t *) malloc(sizeof(ggml_fp16_t) * y_ne);
|
|
||||||
float* d = (float *) malloc(sizeof(float) * d_ne);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < x_ne; i++) {
|
|
||||||
x[i] = ggml_fp32_to_fp16(rand() / (float)RAND_MAX);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < y_ne; i++) {
|
|
||||||
y[i] = ggml_fp32_to_fp16(rand() / (float)RAND_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_vk_buffer_write(&d_X, 0, x, sizeof(ggml_fp16_t) * x_ne, vk_transfer_queues[0]);
|
|
||||||
ggml_vk_buffer_write(&d_Y, 0, y, sizeof(ggml_fp16_t) * y_ne, vk_transfer_queues[1]);
|
|
||||||
|
|
||||||
vk_transfer_queues[0].queue.waitIdle();
|
|
||||||
vk_transfer_queues[1].queue.waitIdle();
|
|
||||||
|
|
||||||
std::vector<vk_sequence> seq;
|
std::vector<vk_sequence> seq;
|
||||||
|
|
||||||
vk_pipeline * p;
|
vk_pipeline * p;
|
||||||
|
@ -1881,10 +1991,38 @@ void ggml_vk_test_matmul_f16(size_t m, size_t n, size_t k, size_t num_it, int sp
|
||||||
GGML_ASSERT(0);
|
GGML_ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const size_t kpad = ggml_vk_align_size(k, p->align);
|
||||||
|
|
||||||
|
vk_buffer d_X;
|
||||||
|
vk_buffer d_Y;
|
||||||
|
vk_buffer d_D;
|
||||||
|
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * kpad * m, &d_X, 0);
|
||||||
|
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * kpad * n, &d_Y, 0);
|
||||||
|
ggml_vk_pool_malloc(sizeof(float) * d_ne * split_k, &d_D, 0);
|
||||||
|
|
||||||
|
ggml_fp16_t* x = (ggml_fp16_t *) malloc(sizeof(ggml_fp16_t) * x_ne);
|
||||||
|
ggml_fp16_t* y = (ggml_fp16_t *) malloc(sizeof(ggml_fp16_t) * y_ne);
|
||||||
|
float* d = (float *) malloc(sizeof(float) * d_ne);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < x_ne; i++) {
|
||||||
|
x[i] = ggml_fp32_to_fp16(rand() / (float)RAND_MAX);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < y_ne; i++) {
|
||||||
|
y[i] = ggml_fp32_to_fp16(rand() / (float)RAND_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
seq.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_X, 0, x, sizeof(ggml_fp16_t) * k, sizeof(ggml_fp16_t) * k, m, sizeof(ggml_fp16_t) * p->align, vk_transfer_queues[0], {}, {}));
|
||||||
|
seq.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_Y, 0, y, sizeof(ggml_fp16_t) * k, sizeof(ggml_fp16_t) * k, n, sizeof(ggml_fp16_t) * p->align, vk_transfer_queues[0], {}, {}));
|
||||||
|
|
||||||
|
ggml_vk_submit(vk_transfer_queues[0], seq, VK_NULL_HANDLE);
|
||||||
|
|
||||||
|
// Wait for transfers to finish
|
||||||
|
vk_transfer_queues[0].queue.waitIdle();
|
||||||
|
|
||||||
auto begin = std::chrono::high_resolution_clock::now();
|
auto begin = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
for (size_t i = 0; i < num_it; i++) {
|
for (size_t i = 0; i < num_it; i++) {
|
||||||
seq.push_back(ggml_vk_matmul(*p, d_X, d_Y, d_D, m, n, k, split_k, vk_compute_queue, {}, {}));
|
seq.push_back(ggml_vk_matmul(*p, d_X, d_Y, d_D, m, n, k, kpad, kpad, m, split_k, vk_compute_queue, {}, {}));
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_vk_submit(vk_compute_queue, seq, VK_NULL_HANDLE);
|
ggml_vk_submit(vk_compute_queue, seq, VK_NULL_HANDLE);
|
||||||
|
@ -1936,4 +2074,70 @@ void ggml_vk_test_matmul_f16(size_t m, size_t n, size_t k, size_t num_it, int sp
|
||||||
free(y);
|
free(y);
|
||||||
free(d);
|
free(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_vk_test_buffer_write_zeropad(size_t m, size_t k, size_t align) {
|
||||||
|
std::vector<vk_sequence> seq;
|
||||||
|
|
||||||
|
const size_t kpad = ggml_vk_align_size(k, align);
|
||||||
|
|
||||||
|
vk_buffer d_X;
|
||||||
|
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * kpad * m, &d_X, 0);
|
||||||
|
vk_buffer d_X2;
|
||||||
|
ggml_vk_pool_malloc(sizeof(ggml_fp16_t) * k * m, &d_X2, 0);
|
||||||
|
|
||||||
|
ggml_fp16_t* x = (ggml_fp16_t *) ggml_vk_host_malloc(sizeof(ggml_fp16_t) * m * k);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < m * k; i++) {
|
||||||
|
x[i] = ggml_fp32_to_fp16(rand() / (float)RAND_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
seq.push_back(ggml_vk_buffer_write_2d_async_zeropad(&d_X, 0, x, sizeof(ggml_fp16_t) * k, sizeof(ggml_fp16_t) * k, m, sizeof(ggml_fp16_t) * align, vk_transfer_queues[0], {}, {}));
|
||||||
|
|
||||||
|
ggml_vk_submit(vk_transfer_queues[0], seq, VK_NULL_HANDLE);
|
||||||
|
|
||||||
|
ggml_vk_buffer_write(&d_X2, 0, x, sizeof(ggml_fp16_t) * k * m, vk_transfer_queues[0]);
|
||||||
|
|
||||||
|
vk_transfer_queues[0].queue.waitIdle();
|
||||||
|
|
||||||
|
ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(sizeof(ggml_fp16_t) * kpad * m);
|
||||||
|
ggml_fp16_t * x_chk2 = (ggml_fp16_t *) malloc(sizeof(ggml_fp16_t) * k * m);
|
||||||
|
|
||||||
|
ggml_vk_buffer_read(&d_X, 0, x_chk, sizeof(ggml_fp16_t) * kpad * m, vk_transfer_queues[0]);
|
||||||
|
ggml_vk_buffer_read(&d_X2, 0, x_chk2, sizeof(ggml_fp16_t) * k * m, vk_transfer_queues[0]);
|
||||||
|
|
||||||
|
double avg_err_async = 0.0;
|
||||||
|
double avg_err_sync = 0.0;
|
||||||
|
|
||||||
|
for (size_t kidx = 0; kidx < kpad; kidx++) {
|
||||||
|
for (size_t midx = 0; midx < m; midx++) {
|
||||||
|
if (kidx < k) {
|
||||||
|
const float err = std::fabs(ggml_fp16_to_fp32(x[midx * k + kidx]) - ggml_fp16_to_fp32(x_chk[midx * kpad + kidx]));
|
||||||
|
const float err2 = std::fabs(ggml_fp16_to_fp32(x[midx * k + kidx]) - ggml_fp16_to_fp32(x_chk2[midx * k + kidx]));
|
||||||
|
if (!std::isnan(err)) {
|
||||||
|
avg_err_async += err;
|
||||||
|
}
|
||||||
|
if (!std::isnan(err2)) {
|
||||||
|
avg_err_sync += err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err > 0.01f) {
|
||||||
|
std::cerr << "midx=" << midx << " kidx=" << kidx << " x: " << ggml_fp16_to_fp32(x[midx * k + kidx]) << " x_chk: " << ggml_fp16_to_fp32(x_chk[midx * kpad + kidx]) << " x_chk2: " << ggml_fp16_to_fp32(x_chk2[midx * k + kidx]) << std::endl;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const float val = std::fabs(ggml_fp16_to_fp32(x_chk[midx * kpad + kidx]));
|
||||||
|
if (val > 0.01f) {
|
||||||
|
std::cerr << "ZEROPAD ERROR midx=" << midx << " kidx=" << kidx << " src0: 0.0 x_chkidx: " << val << std::endl;
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
avg_err_async += val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cerr << "TEST BUFFER WRITE ZEROPAD m=" << m << " k=" << k << " align=" << align << " avg_err_async=" << avg_err_async / (kpad * m) << " avg_err_sync=" << avg_err_sync / (k * m) << std::endl;
|
||||||
|
|
||||||
|
free(x_chk);
|
||||||
|
ggml_vk_host_free(x);
|
||||||
|
ggml_vk_pool_free(d_X);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -21,25 +21,33 @@ layout (binding = 1) writeonly buffer D { float y[]; };
|
||||||
|
|
||||||
layout (push_constant) uniform parameter
|
layout (push_constant) uniform parameter
|
||||||
{
|
{
|
||||||
int N;
|
int M;
|
||||||
|
int K;
|
||||||
|
int stride_a;
|
||||||
|
int stride_b;
|
||||||
} p;
|
} p;
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
const int i = int(gl_GlobalInvocationID.x);
|
const int i = int(gl_GlobalInvocationID.x);
|
||||||
|
|
||||||
if (i >= p.N) {
|
// Transposed
|
||||||
|
const int row = i % (p.K / QUANT_K);
|
||||||
|
const int col = i / (p.K / QUANT_K);
|
||||||
|
|
||||||
|
if (row * QUANT_K >= p.K || col >= p.M) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const block_q4_0 blk = x[i];
|
const int stride_a = p.stride_a / QUANT_K;
|
||||||
|
|
||||||
|
const block_q4_0 blk = x[col * stride_a + row];
|
||||||
const float d = float(blk.d);
|
const float d = float(blk.d);
|
||||||
|
|
||||||
[[unroll]] for (int j = 0; j < QUANT_K/2; ++j) {
|
[[unroll]] for (int j = 0; j < QUANT_K/2; ++j) {
|
||||||
const int x0 = (blk.qs[j] & 0x0F) - 8;
|
const int x0 = (blk.qs[j] & 0x0F) - 8;
|
||||||
const int x1 = (blk.qs[j] >> 4) - 8;
|
const int x1 = (blk.qs[j] >> 4) - 8;
|
||||||
|
|
||||||
y[i*QUANT_K + j + 0 ] = x0*d;
|
y[col * p.stride_b + row*QUANT_K + j + 0 ] = x0*d;
|
||||||
y[i*QUANT_K + j + QUANT_K/2] = x1*d;
|
y[col * p.stride_b + row*QUANT_K + j + QUANT_K/2] = x1*d;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,13 +9,17 @@ layout (binding = 1) writeonly buffer D { float data_b[]; };
|
||||||
|
|
||||||
layout (push_constant) uniform parameter
|
layout (push_constant) uniform parameter
|
||||||
{
|
{
|
||||||
int N;
|
int M;
|
||||||
|
int K;
|
||||||
|
int stride_a;
|
||||||
|
int stride_b;
|
||||||
} p;
|
} p;
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
const int idx = int(gl_GlobalInvocationID.x);
|
const int row = int(gl_GlobalInvocationID.x % p.K);
|
||||||
|
const int col = int(gl_GlobalInvocationID.x / p.K);
|
||||||
|
|
||||||
if (idx < p.N) {
|
if (row < p.M && col < p.K) {
|
||||||
data_b[idx] = float(data_a[idx]);
|
data_b[col * p.stride_b + row] = float(data_a[col * p.stride_a + row]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
|
|
||||||
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
layout (binding = 0) readonly buffer A { float16_t data_a[]; };
|
layout (binding = 0) readonly buffer A { f16mat2x4 data_a[]; };
|
||||||
layout (binding = 1) readonly buffer B { float16_t data_b[]; };
|
layout (binding = 1) readonly buffer B { f16mat2x4 data_b[]; };
|
||||||
layout (binding = 2) writeonly buffer D { float data_d[]; };
|
layout (binding = 2) writeonly buffer D { float data_d[]; };
|
||||||
|
|
||||||
layout (push_constant) uniform parameter
|
layout (push_constant) uniform parameter
|
||||||
|
@ -52,16 +52,16 @@ void main() {
|
||||||
const int tiwr = tiw % (WSUBM / TM);
|
const int tiwr = tiw % (WSUBM / TM);
|
||||||
const int tiwc = tiw / (WSUBM / TM);
|
const int tiwc = tiw / (WSUBM / TM);
|
||||||
|
|
||||||
const int loadr = int(gl_LocalInvocationID.x % BK);
|
const int loadr = int(gl_LocalInvocationID.x % (BK / 8));
|
||||||
const int loadc = int(gl_LocalInvocationID.x / BK);
|
const int loadc = int(gl_LocalInvocationID.x / (BK / 8));
|
||||||
|
|
||||||
const int loadstride = int(gl_WorkGroupSize.x);
|
const int loadstride = int(gl_WorkGroupSize.x * 8) / BK;
|
||||||
|
|
||||||
const int start_k = ik * p.k_split;
|
const int start_k = ik * p.k_split;
|
||||||
const int end_k = (ik + 1) * p.k_split;
|
const int end_k = (ik + 1) * p.k_split;
|
||||||
|
|
||||||
int pos_a = ir * BM * p.stride_a + start_k;
|
int pos_a = ir * BM * p.stride_a / 8 + start_k / 8;
|
||||||
int pos_b = ic * BN * p.stride_b + start_k;
|
int pos_b = ic * BN * p.stride_b / 8 + start_k / 8;
|
||||||
|
|
||||||
float sums[WMITER * TM * WNITER * TN];
|
float sums[WMITER * TM * WNITER * TN];
|
||||||
float16_t cache_a[WMITER * TM];
|
float16_t cache_a[WMITER * TM];
|
||||||
|
@ -72,29 +72,33 @@ void main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
[[unroll]] for (int block = start_k; block < end_k; block += BK) {
|
[[unroll]] for (int block = start_k; block < end_k; block += BK) {
|
||||||
[[unroll]] for (int l = 0; l < BM * BK; l += loadstride) {
|
[[unroll]] for (int l = 0; l < BM; l += loadstride) {
|
||||||
const int lr = l % BK;
|
f16mat2x4 tmp = data_a[pos_a + (loadc + l) * p.stride_a / 8 + loadr];
|
||||||
const int lc = l / BK;
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 0] = tmp[0].x;
|
||||||
if (ir * BM + loadc + lc < p.M && block + loadr + lr < p.K) {
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 1] = tmp[0].y;
|
||||||
buf_a[(loadc + lc) * (BK+1) + loadr + lr] = data_a[pos_a + (loadc + lc) * p.stride_a + loadr + lr];
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 2] = tmp[0].z;
|
||||||
} else {
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 3] = tmp[0].w;
|
||||||
buf_a[(loadc + lc) * (BK+1) + loadr + lr] = 0.0hf;
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 4] = tmp[1].x;
|
||||||
}
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 5] = tmp[1].y;
|
||||||
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 6] = tmp[1].z;
|
||||||
|
buf_a[(loadc + l) * (BK+1) + loadr * 8 + 7] = tmp[1].w;
|
||||||
}
|
}
|
||||||
[[unroll]] for (int l = 0; l < BN * BK; l += loadstride) {
|
[[unroll]] for (int l = 0; l < BN; l += loadstride) {
|
||||||
const int lr = l % BK;
|
f16mat2x4 tmp = data_b[pos_b + (loadc + l) * p.stride_b / 8 + loadr];
|
||||||
const int lc = l / BK;
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 0] = tmp[0].x;
|
||||||
if (ic * BN + loadc + lc < p.N && block + loadr + lr < p.K) {
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 1] = tmp[0].y;
|
||||||
buf_b[(loadc + lc) * (BK+1) + loadr + lr] = data_b[pos_b + (loadc + lc) * p.stride_b + loadr + lr];
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 2] = tmp[0].z;
|
||||||
} else {
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 3] = tmp[0].w;
|
||||||
buf_b[(loadc + lc) * (BK+1) + loadr + lr] = 0.0hf;
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 4] = tmp[1].x;
|
||||||
}
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 5] = tmp[1].y;
|
||||||
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 6] = tmp[1].z;
|
||||||
|
buf_b[(loadc + l) * (BK+1) + loadr * 8 + 7] = tmp[1].w;
|
||||||
}
|
}
|
||||||
|
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
pos_a += BK;
|
pos_a += BK / 8;
|
||||||
pos_b += BK;
|
pos_b += BK / 8;
|
||||||
|
|
||||||
for (int i = 0; i < min(BK, p.K - block); i++) {
|
for (int i = 0; i < min(BK, p.K - block); i++) {
|
||||||
// Load from shared into cache
|
// Load from shared into cache
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
|
|
||||||
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
layout (binding = 0) readonly buffer A { float data_a[]; };
|
layout (binding = 0) readonly buffer A { vec4 data_a[]; };
|
||||||
layout (binding = 1) readonly buffer B { float data_b[]; };
|
layout (binding = 1) readonly buffer B { vec4 data_b[]; };
|
||||||
layout (binding = 2) writeonly buffer D { float data_d[]; };
|
layout (binding = 2) writeonly buffer D { float data_d[]; };
|
||||||
|
|
||||||
layout (push_constant) uniform parameter
|
layout (push_constant) uniform parameter
|
||||||
|
@ -51,16 +51,16 @@ void main() {
|
||||||
const int tiwr = tiw % (WSUBM / TM);
|
const int tiwr = tiw % (WSUBM / TM);
|
||||||
const int tiwc = tiw / (WSUBM / TM);
|
const int tiwc = tiw / (WSUBM / TM);
|
||||||
|
|
||||||
const int loadr = int(gl_LocalInvocationID.x % BK);
|
const int loadr = int(gl_LocalInvocationID.x % (BK / 4));
|
||||||
const int loadc = int(gl_LocalInvocationID.x / BK);
|
const int loadc = int(gl_LocalInvocationID.x / (BK / 4));
|
||||||
|
|
||||||
const int loadstride = int(gl_WorkGroupSize.x);
|
const int loadstride = int(gl_WorkGroupSize.x * 4) / BK;
|
||||||
|
|
||||||
const int start_k = ik * p.k_split;
|
const int start_k = ik * p.k_split;
|
||||||
const int end_k = (ik + 1) * p.k_split;
|
const int end_k = (ik + 1) * p.k_split;
|
||||||
|
|
||||||
int pos_a = ir * BM * p.stride_a + start_k;
|
int pos_a = ir * BM * p.stride_a / 4 + start_k / 4;
|
||||||
int pos_b = ic * BN * p.stride_b + start_k;
|
int pos_b = ic * BN * p.stride_b / 4 + start_k / 4;
|
||||||
|
|
||||||
float sums[WMITER * TM * WNITER * TN];
|
float sums[WMITER * TM * WNITER * TN];
|
||||||
float cache_a[WMITER * TM];
|
float cache_a[WMITER * TM];
|
||||||
|
@ -71,29 +71,25 @@ void main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
[[unroll]] for (int block = start_k; block < end_k; block += BK) {
|
[[unroll]] for (int block = start_k; block < end_k; block += BK) {
|
||||||
[[unroll]] for (int l = 0; l < BM * BK; l += loadstride) {
|
[[unroll]] for (int l = 0; l < BM; l += loadstride) {
|
||||||
const int lr = l % BK;
|
vec4 tmp = data_a[pos_a + (loadc + l) * p.stride_a / 4 + loadr];
|
||||||
const int lc = l / BK;
|
buf_a[(loadc + l) * (BK+1) + loadr * 4 + 0] = tmp.x;
|
||||||
if (ir * BM + loadc + lc < p.M && block + loadr + lr < p.K) {
|
buf_a[(loadc + l) * (BK+1) + loadr * 4 + 1] = tmp.y;
|
||||||
buf_a[(loadc + lc) * (BK+1) + loadr + lr] = data_a[pos_a + (loadc + lc) * p.stride_a + loadr + lr];
|
buf_a[(loadc + l) * (BK+1) + loadr * 4 + 2] = tmp.z;
|
||||||
} else {
|
buf_a[(loadc + l) * (BK+1) + loadr * 4 + 3] = tmp.w;
|
||||||
buf_a[(loadc + lc) * (BK+1) + loadr + lr] = 0.0f;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
[[unroll]] for (int l = 0; l < BN * BK; l += loadstride) {
|
[[unroll]] for (int l = 0; l < BN; l += loadstride) {
|
||||||
const int lr = l % BK;
|
vec4 tmp = data_b[pos_b + (loadc + l) * p.stride_b / 4 + loadr];
|
||||||
const int lc = l / BK;
|
buf_b[(loadc + l) * (BK+1) + loadr * 4 + 0] = tmp.x;
|
||||||
if (ic * BN + loadc + lc < p.N && block + loadr + lr < p.K) {
|
buf_b[(loadc + l) * (BK+1) + loadr * 4 + 1] = tmp.y;
|
||||||
buf_b[(loadc + lc) * (BK+1) + loadr + lr] = data_b[pos_b + (loadc + lc) * p.stride_b + loadr + lr];
|
buf_b[(loadc + l) * (BK+1) + loadr * 4 + 2] = tmp.z;
|
||||||
} else {
|
buf_b[(loadc + l) * (BK+1) + loadr * 4 + 3] = tmp.w;
|
||||||
buf_b[(loadc + lc) * (BK+1) + loadr + lr] = 0.0f;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
pos_a += BK;
|
pos_a += BK / 4;
|
||||||
pos_b += BK;
|
pos_b += BK / 4;
|
||||||
|
|
||||||
for (int i = 0; i < min(BK, p.K - block); i++) {
|
for (int i = 0; i < min(BK, p.K - block); i++) {
|
||||||
// Load from shared into cache
|
// Load from shared into cache
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue