replace get_work_group_size() by local buf

This commit is contained in:
Neo Zhang 2024-07-04 11:55:23 +08:00
parent 2493479958
commit fdef7d606e
4 changed files with 13 additions and 21 deletions

View file

@ -3,6 +3,7 @@
# Copyright (C) 2024 Intel Corporation # Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
clear
mkdir -p build mkdir -p build
cd build cd build
source /opt/intel/oneapi/setvars.sh source /opt/intel/oneapi/setvars.sh

View file

@ -1911,9 +1911,9 @@ static void soft_max_f32_submitter(const float * x, const float * mask, float *
static void soft_max_f32_sycl(const float * x, const float * mask, static void soft_max_f32_sycl(const float * x, const float * mask,
float * dst, const int ncols_x, const int nrows_x, float * dst, const int ncols_x, const int nrows_x,
const int nrows_y, const float scale, const float max_bias, const int nrows_y, const float scale, const float max_bias,
queue_ptr stream) { queue_ptr stream, int device_id) {
int nth = WARP_SIZE; int nth = WARP_SIZE;
int max_block_size = get_work_group_size(stream->get_device()); int max_block_size = ggml_sycl_info().work_group_size(device_id);
while (nth < ncols_x && nth < max_block_size) nth *= 2; while (nth < ncols_x && nth < max_block_size) nth *= 2;
if (nth>max_block_size) nth = max_block_size; if (nth>max_block_size) nth = max_block_size;
@ -2870,7 +2870,7 @@ inline void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_te
memcpy(&max_bias, dst->op_params + 1, sizeof(float)); memcpy(&max_bias, dst->op_params + 1, sizeof(float));
soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00,
nrows_x, nrows_y, scale, max_bias, main_stream); nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device);
} }
inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,

View file

@ -351,15 +351,6 @@ void ggml_backend_sycl_print_sycl_devices();
static ggml_sycl_device_info ggml_sycl_init(); static ggml_sycl_device_info ggml_sycl_init();
ggml_sycl_device_info &ggml_sycl_info(); ggml_sycl_device_info &ggml_sycl_info();
// common host functions
static inline int get_work_group_size(const sycl::device& device) {
dpct::device_info prop;
dpct::get_device_info(prop, device);
return prop.get_max_work_group_size();
}
// common device functions // common device functions
static __dpct_inline__ float warp_reduce_sum(float x, static __dpct_inline__ float warp_reduce_sum(float x,

View file

@ -181,7 +181,7 @@ static void rms_norm_f32(const float* x, float* dst, const int ncols, const floa
static void norm_f32_sycl(const float* x, float* dst, const int ncols, static void norm_f32_sycl(const float* x, float* dst, const int ncols,
const int nrows, const float eps, const int nrows, const float eps,
queue_ptr stream) { queue_ptr stream, int device_id) {
GGML_ASSERT(ncols % WARP_SIZE == 0); GGML_ASSERT(ncols % WARP_SIZE == 0);
if (ncols < 1024) { if (ncols < 1024) {
const sycl::range<3> block_dims(1, 1, WARP_SIZE); const sycl::range<3> block_dims(1, 1, WARP_SIZE);
@ -197,7 +197,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols,
}); });
} }
else { else {
const int work_group_size = get_work_group_size(stream->get_device()); const int work_group_size = ggml_sycl_info().work_group_size(device_id);
const sycl::range<3> block_dims(1, 1, work_group_size); const sycl::range<3> block_dims(1, 1, work_group_size);
/* /*
DPCT1049:17: The work-group size passed to the SYCL kernel may exceed DPCT1049:17: The work-group size passed to the SYCL kernel may exceed
@ -222,7 +222,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols,
static void group_norm_f32_sycl(const float* x, float* dst, static void group_norm_f32_sycl(const float* x, float* dst,
const int num_groups, const int group_size, const int num_groups, const int group_size,
const int ne_elements, queue_ptr stream) { const int ne_elements, queue_ptr stream, int device_id) {
static const float eps = 1e-6f; static const float eps = 1e-6f;
if (group_size < 1024) { if (group_size < 1024) {
const sycl::range<3> block_dims(1, 1, WARP_SIZE); const sycl::range<3> block_dims(1, 1, WARP_SIZE);
@ -240,7 +240,7 @@ static void group_norm_f32_sycl(const float* x, float* dst,
}); });
} }
else { else {
const int work_group_size = get_work_group_size(stream->get_device()); const int work_group_size = ggml_sycl_info().work_group_size(device_id);
const sycl::range<3> block_dims(1, 1, work_group_size); const sycl::range<3> block_dims(1, 1, work_group_size);
/* /*
DPCT1049:18: The work-group size passed to the SYCL kernel may exceed DPCT1049:18: The work-group size passed to the SYCL kernel may exceed
@ -269,7 +269,7 @@ static void group_norm_f32_sycl(const float* x, float* dst,
static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols,
const int nrows, const float eps, const int nrows, const float eps,
queue_ptr stream) { queue_ptr stream, int device_id) {
GGML_ASSERT(ncols % WARP_SIZE == 0); GGML_ASSERT(ncols % WARP_SIZE == 0);
// printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE); // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE);
if (ncols < 1024) { if (ncols < 1024) {
@ -286,7 +286,7 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols,
}); });
} }
else { else {
const int work_group_size = get_work_group_size(stream->get_device()); const int work_group_size = ggml_sycl_info().work_group_size(device_id);
const sycl::range<3> block_dims(1, 1, work_group_size); const sycl::range<3> block_dims(1, 1, work_group_size);
/* /*
DPCT1049:19: The work-group size passed to the SYCL kernel may exceed DPCT1049:19: The work-group size passed to the SYCL kernel may exceed
@ -322,7 +322,7 @@ void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
float eps; float eps;
memcpy(&eps, dst->op_params, sizeof(float)); memcpy(&eps, dst->op_params, sizeof(float));
norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream); norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream, ctx.device);
(void)src1; (void)src1;
(void)dst; (void)dst;
@ -340,7 +340,7 @@ void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor*
int num_groups = dst->op_params[0]; int num_groups = dst->op_params[0];
int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups); int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream); group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream, ctx.device);
(void)src1; (void)src1;
(void)dst; (void)dst;
@ -362,7 +362,7 @@ void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* sr
float eps; float eps;
memcpy(&eps, dst->op_params, sizeof(float)); memcpy(&eps, dst->op_params, sizeof(float));
rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream); rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream, ctx.device);
(void)src1; (void)src1;
(void)dst; (void)dst;