diff --git a/examples/sycl/build.sh b/examples/sycl/build.sh index 8fe0a6790..32ad97da3 100755 --- a/examples/sycl/build.sh +++ b/examples/sycl/build.sh @@ -3,6 +3,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: MIT +clear mkdir -p build cd build source /opt/intel/oneapi/setvars.sh diff --git a/ggml/src/ggml-sycl.cpp b/ggml/src/ggml-sycl.cpp index cfa8ecd76..5d2cfdb71 100644 --- a/ggml/src/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl.cpp @@ -1911,9 +1911,9 @@ static void soft_max_f32_submitter(const float * x, const float * mask, float * static void soft_max_f32_sycl(const float * x, const float * mask, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, - queue_ptr stream) { + queue_ptr stream, int device_id) { int nth = WARP_SIZE; - int max_block_size = get_work_group_size(stream->get_device()); + int max_block_size = ggml_sycl_info().work_group_size(device_id); while (nth < ncols_x && nth < max_block_size) nth *= 2; if (nth>max_block_size) nth = max_block_size; @@ -2870,7 +2870,7 @@ inline void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_te memcpy(&max_bias, dst->op_params + 1, sizeof(float)); soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, - nrows_x, nrows_y, scale, max_bias, main_stream); + nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device); } inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 7e25bcb78..5a2f1664d 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -351,15 +351,6 @@ void ggml_backend_sycl_print_sycl_devices(); static ggml_sycl_device_info ggml_sycl_init(); ggml_sycl_device_info &ggml_sycl_info(); -// common host functions - -static inline int get_work_group_size(const sycl::device& device) { - dpct::device_info prop; - dpct::get_device_info(prop, device); - return prop.get_max_work_group_size(); -} - - // common device functions static __dpct_inline__ float warp_reduce_sum(float x, diff --git a/ggml/src/ggml-sycl/norm.cpp b/ggml/src/ggml-sycl/norm.cpp index a77f7852c..3ad1131d6 100644 --- a/ggml/src/ggml-sycl/norm.cpp +++ b/ggml/src/ggml-sycl/norm.cpp @@ -181,7 +181,7 @@ static void rms_norm_f32(const float* x, float* dst, const int ncols, const floa static void norm_f32_sycl(const float* x, float* dst, const int ncols, const int nrows, const float eps, - queue_ptr stream) { + queue_ptr stream, int device_id) { GGML_ASSERT(ncols % WARP_SIZE == 0); if (ncols < 1024) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); @@ -197,7 +197,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols, }); } else { - const int work_group_size = get_work_group_size(stream->get_device()); + const int work_group_size = ggml_sycl_info().work_group_size(device_id); const sycl::range<3> block_dims(1, 1, work_group_size); /* DPCT1049:17: The work-group size passed to the SYCL kernel may exceed @@ -222,7 +222,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols, static void group_norm_f32_sycl(const float* x, float* dst, const int num_groups, const int group_size, - const int ne_elements, queue_ptr stream) { + const int ne_elements, queue_ptr stream, int device_id) { static const float eps = 1e-6f; if (group_size < 1024) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); @@ -240,7 +240,7 @@ static void group_norm_f32_sycl(const float* x, float* dst, }); } else { - const int work_group_size = get_work_group_size(stream->get_device()); + const int work_group_size = ggml_sycl_info().work_group_size(device_id); const sycl::range<3> block_dims(1, 1, work_group_size); /* DPCT1049:18: The work-group size passed to the SYCL kernel may exceed @@ -269,7 +269,7 @@ static void group_norm_f32_sycl(const float* x, float* dst, static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, const int nrows, const float eps, - queue_ptr stream) { + queue_ptr stream, int device_id) { GGML_ASSERT(ncols % WARP_SIZE == 0); // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE); if (ncols < 1024) { @@ -286,7 +286,7 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, }); } else { - const int work_group_size = get_work_group_size(stream->get_device()); + const int work_group_size = ggml_sycl_info().work_group_size(device_id); const sycl::range<3> block_dims(1, 1, work_group_size); /* DPCT1049:19: The work-group size passed to the SYCL kernel may exceed @@ -322,7 +322,7 @@ void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, float eps; memcpy(&eps, dst->op_params, sizeof(float)); - norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream); + norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream, ctx.device); (void)src1; (void)dst; @@ -340,7 +340,7 @@ void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* int num_groups = dst->op_params[0]; int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups); - group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream); + group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream, ctx.device); (void)src1; (void)dst; @@ -362,7 +362,7 @@ void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* sr float eps; memcpy(&eps, dst->op_params, sizeof(float)); - rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream); + rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream, ctx.device); (void)src1; (void)dst;