Continue implementation

This commit is contained in:
0cc4m 2023-06-11 08:49:43 +02:00
parent 4a96d0eb7f
commit 88d4ec05a8

View file

@ -3,23 +3,20 @@
#include <vulkan/vulkan.hpp> #include <vulkan/vulkan.hpp>
#include "external/vk_mem_alloc.h" #include "external/vk_mem_alloc.h"
#include <iostream> #include <atomic>
#include <fstream> #include <fstream>
#include <iostream>
#include <limits>
#include "ggml.h" #include "ggml.h"
// static cl_platform_id platform; #define VK_API_VERSION VK_API_VERSION_1_2
// static cl_device_id device;
// static cl_context context;
// static cl_command_queue queue;
// static cl_program program;
// static cl_kernel kernel_q4_0, kernel_q4_1, kernel_q4_2, kernel_q5_0, kernel_q5_1, kernel_q8_0;
// static cl_mem cl_buffer_a, cl_buffer_qb, cl_buffer_b, cl_buffer_c;
// static size_t cl_size_a = 0, cl_size_qb = 0, cl_size_b = 0, cl_size_c = 0;
vk::Instance instance; vk::Instance vk_instance;
vk::PhysicalDevice physical_device; uint32_t vk_compute_queue_family_index;
vk::PhysicalDevice vk_physical_device;
vk::Device vk_device; vk::Device vk_device;
vk::DescriptorSetLayout vk_pipeline_matmul_dsl;
vk::Pipeline vk_pipeline_matmul; vk::Pipeline vk_pipeline_matmul;
VmaAllocation vk_buffer_qa_alloc, vk_buffer_a_alloc, vk_buffer_b_alloc, vk_buffer_c_alloc; VmaAllocation vk_buffer_qa_alloc, vk_buffer_a_alloc, vk_buffer_b_alloc, vk_buffer_c_alloc;
vk::Buffer vk_buffer_qa, vk_buffer_a, vk_buffer_b, vk_buffer_c; vk::Buffer vk_buffer_qa, vk_buffer_a, vk_buffer_b, vk_buffer_c;
@ -27,29 +24,27 @@ vk::Buffer vk_buffer_qa, vk_buffer_a, vk_buffer_b, vk_buffer_c;
void ggml_vk_init(void) { void ggml_vk_init(void) {
char* GGML_VULKAN_DEVICE = getenv("GGML_VULKAN_DEVICE"); char* GGML_VULKAN_DEVICE = getenv("GGML_VULKAN_DEVICE");
int dev_num = (GGML_VULKAN_DEVICE == NULL ? 0 : atoi(GGML_VULKAN_DEVICE)); int dev_num = (GGML_VULKAN_DEVICE == NULL ? 0 : atoi(GGML_VULKAN_DEVICE));
printf("\nInitializing Vulkan...");
printf("\nAttempting to use: Device=%d\n", dev_num);
vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION_1_2 }; vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION };
const std::vector<const char*> layers = { "VK_LAYER_KHRONOS_validation" }; const std::vector<const char*> layers = { "VK_LAYER_KHRONOS_validation" };
vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags(), &app_info, layers.size(), layers.data()); vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags(), &app_info, layers.size(), layers.data());
instance = vk::createInstance(instance_create_info); vk_instance = vk::createInstance(instance_create_info);
physical_device = instance.enumeratePhysicalDevices()[dev_num]; vk_physical_device = vk_instance.enumeratePhysicalDevices()[dev_num];
vk::PhysicalDeviceProperties device_props = physical_device.getProperties(); vk::PhysicalDeviceProperties device_props = vk_physical_device.getProperties();
std::cout << "Picked: " << device_props.deviceName << std::endl; std::cout << "ggml_vulkan: Using " << device_props.deviceName << std::endl;
std::vector<vk::QueueFamilyProperties> queue_family_props = physical_device.getQueueFamilyProperties(); std::vector<vk::QueueFamilyProperties> queue_family_props = vk_physical_device.getQueueFamilyProperties();
auto prop_it = std::find_if(queue_family_props.begin(), queue_family_props.end(), [](const vk::QueueFamilyProperties& prop) auto prop_it = std::find_if(queue_family_props.begin(), queue_family_props.end(), [](const vk::QueueFamilyProperties& prop)
{ {
return prop.queueFlags & vk::QueueFlagBits::eCompute; return prop.queueFlags & vk::QueueFlagBits::eCompute;
}); });
const uint32_t compute_queue_family_index = std::distance(queue_family_props.begin(), prop_it); vk_compute_queue_family_index = std::distance(queue_family_props.begin(), prop_it);
const float queue_priority = 1.0f; const float queue_priority = 1.0f;
vk::DeviceQueueCreateInfo device_queue_create_info(vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, &queue_priority); vk::DeviceQueueCreateInfo device_queue_create_info(vk::DeviceQueueCreateFlags(), vk_compute_queue_family_index, 1, &queue_priority);
vk::DeviceCreateInfo device_create_info(vk::DeviceCreateFlags(), device_queue_create_info); vk::DeviceCreateInfo device_create_info(vk::DeviceCreateFlags(), device_queue_create_info);
vk_device = physical_device.createDevice(device_create_info); vk_device = vk_physical_device.createDevice(device_create_info);
std::vector<char> matmul_shader_contents; std::vector<char> matmul_shader_contents;
if (std::ifstream shader_file{ "ggml-vulkan-matmul.spv", std::ios::binary | std::ios::ate }) { if (std::ifstream shader_file{ "ggml-vulkan-matmul.spv", std::ios::binary | std::ios::ate }) {
@ -74,9 +69,9 @@ void ggml_vk_init(void) {
vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info( vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
vk::DescriptorSetLayoutCreateFlags(), vk::DescriptorSetLayoutCreateFlags(),
descriptor_set_layout_binding); descriptor_set_layout_binding);
vk::DescriptorSetLayout descriptor_set_layout = vk_device.createDescriptorSetLayout(descriptor_set_layout_create_info); vk_pipeline_matmul_dsl = vk_device.createDescriptorSetLayout(descriptor_set_layout_create_info);
vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), descriptor_set_layout); vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), vk_pipeline_matmul_dsl);
vk::PipelineLayout pipeline_layout = vk_device.createPipelineLayout(pipeline_layout_create_info); vk::PipelineLayout pipeline_layout = vk_device.createPipelineLayout(pipeline_layout_create_info);
vk::PipelineCache pipeline_cache = vk_device.createPipelineCache(vk::PipelineCacheCreateInfo()); vk::PipelineCache pipeline_cache = vk_device.createPipelineCache(vk::PipelineCacheCreateInfo());
@ -92,151 +87,112 @@ void ggml_vk_init(void) {
vk_pipeline_matmul = vk_device.createComputePipeline(pipeline_cache, compute_pipeline_create_info).value; vk_pipeline_matmul = vk_device.createComputePipeline(pipeline_cache, compute_pipeline_create_info).value;
} }
// static void ggml_cl_malloc(size_t req_size, size_t* cur_size, cl_mem_flags flags, cl_mem* buf) { // buffer pool for vulkan
// if (req_size <= *cur_size) { #define MAX_VK_BUFFERS 256
// return;
// } struct scoped_spin_lock {
// std::atomic_flag& lock;
// // Reallocate buffer with enough space scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
// if (*cur_size > 0) { while (lock.test_and_set(std::memory_order_acquire)) {
// clReleaseMemObject(*buf); ; // spin
// } }
// cl_int err; }
// *buf = clCreateBuffer(context, flags, req_size, NULL, &err); ~scoped_spin_lock() {
// *cur_size = req_size; lock.clear(std::memory_order_release);
// CL_CHECK(err, "clCreateBuffer"); }
// } scoped_spin_lock(const scoped_spin_lock&) = delete;
// scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
// void ggml_cl_sgemm_wrapper( };
// const enum ggml_blas_order order, const enum ggml_blas_op trans_a, const enum ggml_blas_op trans_b,
// const int m, const int n, const int k, struct vk_buffer {
// const float alpha, const void *host_a, const int lda, vk::Buffer buffer;
// const float *host_b, const int ldb, const float beta, size_t size = 0;
// float *host_c, const int ldc, const int btype) { };
// cl_int err = 0;
// static vk_buffer g_vk_buffer_pool[MAX_VK_BUFFERS];
// cl_kernel kernel; static std::atomic_flag g_vk_pool_lock = ATOMIC_FLAG_INIT;
// size_t global = n * k, local, size_qb;
// bool dequant; static vk::Buffer ggml_vk_pool_malloc(size_t size, size_t * actual_size) {
// cl_block_q5_0* cl_host_b; scoped_spin_lock lock(g_vk_pool_lock);
//
// switch (btype) { int best_i = -1;
// case GGML_TYPE_F32: size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
// dequant = false; int worst_i = -1;
// break; size_t worst_size = 0; //largest unused buffer seen so far
// case GGML_TYPE_Q4_0: for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
// dequant = true; vk_buffer &b = g_vk_buffer_pool[i];
// kernel = kernel_q4_0; if (b.size > 0 && b.size >= size && b.size < best_size)
// local = 16; {
// size_qb = global * (sizeof(float) + local) / 32; best_i = i;
// break; best_size = b.size;
// case GGML_TYPE_Q4_1: }
// dequant = true; if (b.size > 0 && b.size > worst_size)
// kernel = kernel_q4_1; {
// local = 16; worst_i = i;
// size_qb = global * (sizeof(float) * 2 + local) / 32; worst_size = b.size;
// break; }
// case GGML_TYPE_Q4_2: }
// dequant = true; if(best_i!=-1) //found the smallest buffer that fits our needs
// kernel = kernel_q4_2; {
// local = 8; vk_buffer& b = g_vk_buffer_pool[best_i];
// size_qb = global * (sizeof(ggml_fp16_t) + local) / 16; vk::Buffer buffer = b.buffer;
// break; *actual_size = b.size;
// case GGML_TYPE_Q5_0: b.size = 0;
// dequant = true; return buffer;
// kernel = kernel_q5_0; }
// local = 16; if(worst_i!=-1) //no buffer that fits our needs, resize largest one to save memory
// // For some reason OpenCL seems to be incapable of working with structs of size 22. {
// // 20 and 24 bytes are fine. Workaround to do the fp16 to fp32 step on CPU... vk_buffer& b = g_vk_buffer_pool[worst_i];
// // TODO Find the reason, fix and remove workaround. vk::Buffer buffer = b.buffer;
// const block_q5_0* b = (const block_q5_0*) host_b; b.size = 0;
// cl_host_b = (cl_block_q5_0*) malloc(sizeof(cl_block_q5_0) * global / 32); // vkReleaseMemObject(buffer);
// for (size_t i = 0; i < global / 32; i++) { }
// cl_host_b[i].d = ggml_fp16_to_fp32(b[i].d); vk::Buffer buffer;
// memcpy(&cl_host_b[i].qh, b[i].qh, sizeof(uint32_t));
// memcpy(&cl_host_b[i].qs, b[i].qs, QK5_0 / 2); vk::BufferCreateInfo buffer_create_info{
// } vk::BufferCreateFlags(),
// host_b = (const float*) cl_host_b; size,
// size_qb = global * (sizeof(float) + sizeof(uint32_t) + local) / 32; vk::BufferUsageFlagBits::eStorageBuffer,
// break; vk::SharingMode::eExclusive,
// case GGML_TYPE_Q5_1: 1,
// dequant = true; &vk_compute_queue_family_index
// kernel = kernel_q5_1; };
// local = 16;
// size_qb = global * (sizeof(ggml_fp16_t) * 2 + sizeof(uint32_t) + local) / 32; VmaAllocatorCreateInfo allocator_info = {};
// break; allocator_info.vulkanApiVersion = VK_API_VERSION;
// case GGML_TYPE_Q8_0: allocator_info.physicalDevice = vk_physical_device;
// dequant = true; allocator_info.device = vk_device;
// kernel = kernel_q8_0; allocator_info.instance = vk_instance;
// local = 32;
// size_qb = global * (sizeof(float) + local) / 32; VmaAllocator allocator;
// break; vmaCreateAllocator(&allocator_info, &allocator);
// default:
// fprintf(stderr, "Error: Unsupported OpenCL btype %d\n", btype); VmaAllocationCreateInfo allocation_info = {};
// abort(); allocation_info.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
// }
// VmaAllocation buffer_allocation;
// const size_t size_a = m * k * sizeof(float); vmaCreateBuffer(allocator,
// const size_t size_b = n * k * sizeof(float); &static_cast<VkBufferCreateInfo>(buffer_create_info),
// const size_t size_c = m * n * sizeof(float); &allocation_info,
// &static_cast<VkBuffer>(buffer),
// // Prepare buffers &buffer_allocation,
// ggml_cl_malloc(size_a, &cl_size_a, CL_MEM_READ_ONLY, &cl_buffer_a); nullptr);
// if (dequant) {
// ggml_cl_malloc(size_qb, &cl_size_qb, CL_MEM_READ_ONLY, &cl_buffer_qb); *actual_size = size;
// } return buffer;
// ggml_cl_malloc(size_b, &cl_size_b, CL_MEM_READ_WRITE, &cl_buffer_b); }
// ggml_cl_malloc(size_c, &cl_size_c, CL_MEM_WRITE_ONLY, &cl_buffer_c);
// static void ggml_vk_pool_free(vk::Buffer buffer, size_t size) {
// cl_event ev_a, ev_qb, ev_b; scoped_spin_lock lock(g_vk_pool_lock);
//
// if (dequant) { for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
// err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &cl_buffer_qb); vk_buffer& b = g_vk_buffer_pool[i];
// err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &cl_buffer_b); if (b.size == 0) {
// CL_CHECK(err, "clSetKernelArg"); b.buffer = buffer;
// err = clEnqueueWriteBuffer(queue, cl_buffer_qb, CL_FALSE, 0, size_qb, host_b, 0, NULL, &ev_qb); b.size = size;
// CL_CHECK(err, "clEnqueueWriteBuffer qb"); return;
// } else { }
// err = clEnqueueWriteBuffer(queue, cl_buffer_b, CL_FALSE, 0, size_b, host_b, 0, NULL, &ev_b); }
// CL_CHECK(err, "clEnqueueWriteBuffer b"); fprintf(stderr, "WARNING: vk buffer pool full, increase MAX_VK_BUFFERS\n");
// } vkReleaseMemObject(mem);
// }
// err = clEnqueueWriteBuffer(queue, cl_buffer_a, CL_FALSE, 0, size_a, host_a, 0, NULL, &ev_a);
// CL_CHECK(err, "clEnqueueWriteBuffer a");
// if (dequant) {
// err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 1, &ev_qb, &ev_b);
// CL_CHECK(err, "clEnqueueNDRangeKernel");
// clReleaseEvent(ev_qb);
// }
// clWaitForEvents(1, &ev_a);
// clWaitForEvents(1, &ev_b);
// clReleaseEvent(ev_a);
// clReleaseEvent(ev_b);
//
// cl_event ev_sgemm;
// CLBlastStatusCode status = CLBlastSgemm((CLBlastLayout)order,
// (CLBlastTranspose)trans_a, (CLBlastTranspose)trans_b,
// m, n, k,
// alpha,
// cl_buffer_a, 0, lda,
// cl_buffer_b, 0, ldb,
// beta,
// cl_buffer_c, 0, ldc,
// &queue, &ev_sgemm);
//
// if (status != CLBlastSuccess) {
// fprintf(stderr, "Error: CLBlast SGEMM %d\n", status);
// abort();
// }
//
// cl_event ev_c;
// clEnqueueReadBuffer(queue, cl_buffer_c, CL_TRUE, 0, size_c, host_c, 1, &ev_sgemm, &ev_c);
//
// // Wait for completion
// clWaitForEvents(1, &ev_c);
// clReleaseEvent(ev_sgemm);
// clReleaseEvent(ev_c);
// if (btype == GGML_TYPE_Q5_0) {
// free((void*) cl_host_b);
// }
// }