diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 3da528468..3b5c1511e 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -65,88 +65,6 @@ __kernel void convert_fp16_to_fp32(__global half* x, __global float* y) { y[i] = vload_half(0, &x[i]); } - -__kernel void dequantize_row_q4_0(__global struct block_q4_0* x, __global float* y) { - const uint qk = 32; - - const uint i = get_global_id(0) / qk; - const uint j = get_local_id(0); - - const float d = x[i].d; - - const int x0 = (x[i].qs[j] & 0xf) - 8; - const int x1 = (x[i].qs[j] >> 4) - 8; - - y[i*qk + j + 0 ] = x0*d; - y[i*qk + j + qk/2] = x1*d; -} - -__kernel void dequantize_row_q4_1(__global struct block_q4_1* x, __global float* y) { - const uint qk = 32; - - const uint i = get_global_id(0) / qk; - const uint j = get_local_id(0); - - const float d = x[i].d; - const float m = x[i].m; - - const int x0 = (x[i].qs[j] & 0xf); - const int x1 = (x[i].qs[j] >> 4); - - y[i*qk + j + 0 ] = x0*d + m; - y[i*qk + j + qk/2] = x1*d + m; -} - -__kernel void dequantize_row_q5_0(__global struct block_q5_0* x, __global float* y) { - const uint qk = 32; - - const uint i = get_global_id(0) / qk; - const uint j = get_local_id(0); - - const float d = vload_half(0, (__global half*) &x[i].d); - - uint32_t qh = x[i].qh; - - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int32_t x0 = ((x[i].qs[j] & 0xf) | xh_0) - 16; - const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; - - y[i*qk + j + 0 ] = x0*d; - y[i*qk + j + qk/2] = x1*d; -} - -__kernel void dequantize_row_q5_1(__global struct block_q5_1* x, __global float* y) { - const uint qk = 32; - - const uint i = get_global_id(0) / qk; - const uint j = get_local_id(0); - - const float d = vload_half(0, (__global half*) &x[i].d); - const float m = vload_half(0, (__global half*) &x[i].m); - - uint32_t qh = x[i].qh; - - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int x0 = (x[i].qs[j] & 0xf) | xh_0; - const int x1 = (x[i].qs[j] >> 4) | xh_1; - - y[i*qk + j + 0 ] = x0*d + m; - y[i*qk + j + qk/2] = x1*d + m; -} - -__kernel void dequantize_row_q8_0(__global struct block_q8_0* x, __global float* y) { - const uint qk = 32; - const uint i = get_global_id(0) / qk; - const uint j = get_local_id(0); - - const float d = x[i].d; - y[i*qk + j] = x[i].qs[j]*d; -} - void dequantize_q4_0(__global const struct block_q4_0* x, const int ib, const int iqs, float* v0, float* v1) { const float d = x[ib].d; @@ -209,8 +127,32 @@ void dequantize_q8_0(__global const struct block_q8_0* x, const int ib, const in *v1 = vi1*d; } void convert_f16(__global half* x, const int ib, const int iqs, float* v0, float* v1){ - *v0 = vload_half(0, (__global half*) &x[ib + 0]); - *v1 = vload_half(0, (__global half*) &x[ib + 1]); + *v0 = vload_half(0, &x[ib + 0]); + *v1 = vload_half(0, &x[ib + 1]); +} +); + +std::string dequant_template = MULTILINE_QUOTE( +__kernel void KERNEL_NAME(__global X_TYPE* x, __global float* y) { + const int i = get_group_id(0)*get_local_size(0) + get_local_id(0)*2; + + if (i >= get_global_size(0)) { + return; + } + + const uint qk = QUANT_K; + const uint qr = QUANT_R; + + const int ib = i/qk; // block index + const int iqs = (i%qk)/qr; // quant index + const int iybs = i - i%qk; // y block start index + const int y_offset = qr == 1 ? 1 : qk/2; + + // dequantize + float v0, v1; + DEQUANT_FUNC(x, ib, iqs, &v0, &v1); + y[iybs + iqs + 0] = v0; + y[iybs + iqs + y_offset] = v1; } ); @@ -256,17 +198,26 @@ __kernel void KERNEL_NAME(__global X_TYPE* x, __local float* tmp, __global float } ); -std::array dequant_mul_mat_vec_str_keys = { +std::array dequant_str_keys = { "KERNEL_NAME", "X_TYPE", "QUANT_K", "QUANT_R", "DEQUANT_FUNC" }; +std::array dequant_str_values = { + "dequantize_row_q4_0", "struct block_q4_0", "32", "2", "dequantize_q4_0", + "dequantize_row_q4_1", "struct block_q4_1", "32", "2", "dequantize_q4_1", + "dequantize_row_q5_0", "struct block_q5_0", "32", "2", "dequantize_q5_0", + "dequantize_row_q5_1", "struct block_q5_1", "32", "2", "dequantize_q5_1", + "dequantize_row_q8_0", "struct block_q8_0", "32", "1", "dequantize_q8_0", + "convert_row_f16", "half", "1", "1", "convert_f16" +}; + std::array dequant_mul_mat_vec_str_values = { "dequantize_mul_mat_vec_q4_0", "struct block_q4_0", "32", "2", "dequantize_q4_0", "dequantize_mul_mat_vec_q4_1", "struct block_q4_1", "32", "2", "dequantize_q4_1", "dequantize_mul_mat_vec_q5_0", "struct block_q5_0", "32", "2", "dequantize_q5_0", "dequantize_mul_mat_vec_q5_1", "struct block_q5_1", "32", "2", "dequantize_q5_1", "dequantize_mul_mat_vec_q8_0", "struct block_q8_0", "32", "1", "dequantize_q8_0", - "convert_mul_mat_vec_f16", "half", "32", "1", "convert_f16" + "convert_mul_mat_vec_f16", "half", "1", "1", "convert_f16" }; static std::string& sreplace(std::string& s, const std::string& from, const std::string& to) { @@ -281,12 +232,15 @@ static std::string& sreplace(std::string& s, const std::string& from, const std: static std::string generate_kernels() { std::stringstream src; src << program_source << '\n'; - for (size_t i = 0; i < dequant_mul_mat_vec_str_values.size(); i += dequant_mul_mat_vec_str_keys.size()) { - std::string kernel = dequant_mul_mat_vec_template; - for (size_t j = 0; j < dequant_mul_mat_vec_str_keys.size(); j++) { - sreplace(kernel, dequant_mul_mat_vec_str_keys[j], dequant_mul_mat_vec_str_values[i + j]); + for (size_t i = 0; i < dequant_str_values.size(); i += dequant_str_keys.size()) { + std::string dequant_kernel = dequant_template; + std::string dmmv_kernel = dequant_mul_mat_vec_template; + for (size_t j = 0; j < dequant_str_keys.size(); j++) { + sreplace(dequant_kernel, dequant_str_keys[j], dequant_str_values[i + j]); + sreplace(dmmv_kernel, dequant_str_keys[j], dequant_mul_mat_vec_str_values[i + j]); } - src << kernel << '\n'; + src << dequant_kernel << '\n'; + src << dmmv_kernel << '\n'; } return src.str(); } @@ -308,7 +262,7 @@ static cl_command_queue queue; static cl_program program; static cl_mem cl_buffer_a, cl_buffer_qb, cl_buffer_b, cl_buffer_c; static size_t cl_size_a = 0, cl_size_qb = 0, cl_size_b = 0, cl_size_c = 0; -static cl_kernel convert_fp16_to_fp32_cl; +static cl_kernel convert_row_f16_cl; static cl_kernel dequantize_row_q4_0_cl, dequantize_row_q4_1_cl, dequantize_row_q5_0_cl, dequantize_row_q5_1_cl, dequantize_row_q8_0_cl; static cl_kernel dequantize_mul_mat_vec_q4_0_cl, dequantize_mul_mat_vec_q4_1_cl, dequantize_mul_mat_vec_q5_0_cl, dequantize_mul_mat_vec_q5_1_cl, dequantize_mul_mat_vec_q8_0_cl, convert_mul_mat_vec_f16_cl; static bool fp16_support = false; @@ -392,7 +346,7 @@ void ggml_cl_init(void) { program = build_program_from_source(context, device, kernel_src.c_str()); // FP16 to FP32 kernel - convert_fp16_to_fp32_cl = clCreateKernel(program, "convert_fp16_to_fp32", &err); + convert_row_f16_cl = clCreateKernel(program, "convert_row_f16", &err); CL_CHECK(err, "clCreateKernel"); // Dequantize kernels @@ -450,7 +404,7 @@ static cl_kernel* ggml_get_to_fp32_cl(ggml_type type) { case GGML_TYPE_Q8_0: return &dequantize_row_q8_0_cl; case GGML_TYPE_F16: - return &convert_fp16_to_fp32_cl; + return &convert_row_f16_cl; default: return nullptr; } @@ -790,7 +744,6 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL), "ggml_cl_h2d_tensor_2d"); // compute - // dequantize_mul_mat_vec(__global void * vx, __local float* tmp, __global float * y, __global float * dst, __global int ncols, __global int vx_type) { const size_t global = ne01 * CL_DMMV_BLOCK_SIZE; const size_t local = CL_DMMV_BLOCK_SIZE; const cl_int ncols = ne00; @@ -804,11 +757,10 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * } else { // general dequantization kernel + CLBlast matrix matrix multiplication // convert src0 to fp32 on device const size_t global = x_ne; - const size_t local = ggml_blck_size(type) / 2; CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q), "clSetKernelArg"); CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X), "clSetKernelArg"); CL_CHECK(clFinish(queue), "clFinish"); - CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, NULL, &global, type == GGML_TYPE_F16 ? NULL : &local, 0, NULL, NULL), "clEnqueueNDRangeKernel"); + CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, NULL, &global, NULL, 0, NULL, NULL), "clEnqueueNDRangeKernel"); // copy src1 to device CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL), "ggml_cl_h2d_tensor_2d"); @@ -1050,4 +1002,4 @@ void ggml_cl_sgemm_wrapper( clWaitForEvents(1, &ev_c); clReleaseEvent(ev_sgemm); clReleaseEvent(ev_c); -} \ No newline at end of file +} diff --git a/model_adapter.cpp b/model_adapter.cpp index d337b666b..2c65b3a4b 100644 --- a/model_adapter.cpp +++ b/model_adapter.cpp @@ -203,7 +203,7 @@ void print_tok_vec(std::vector &embd) fin.read((char *)&temp, sizeof(temp));//rot fin.read((char *)&ftype, sizeof(ftype));//filetype - if(ver==1 || ftype==7) //q8 formats treat as old one + if(ver==1) { fileformat = FileFormat::GGJT; }