From 8241151f1645a7dd6087c2c5d409bfb69de7d28c Mon Sep 17 00:00:00 2001
From: arthw <14088817+arthw@users.noreply.github.com>
Date: Sat, 14 Sep 2024 09:01:05 +0800
Subject: [PATCH 1/2] set context default to avoid memory issue, update guide
---
docs/backend/SYCL.md | 8 ++++++++
examples/sycl/run-llama2.sh | 7 ++++---
2 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md
index e3b9572cc..ff896af53 100644
--- a/docs/backend/SYCL.md
+++ b/docs/backend/SYCL.md
@@ -636,6 +636,14 @@ use 1 SYCL GPUs: [0] with Max compute units:512
It's same for other projects including llama.cpp SYCL backend.
+- Meet issue: `Native API failed. Native API returns: -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -6 (PI_ERROR_OUT_OF_HOST_MEMORY)` or `failed to allocate SYCL0 buffer`
+
+ Device Memory is not enough.
+
+ |Reason|Solution|
+ |-|-|
+ |Default Context is too big. It leads to more memory usage.|Set `-c 8192` or smaller value.|
+ |Model is big and require more memory than device's.|Choose smaller quantized model, like Q5 -> Q4;
Use more than one devices to load model.|
### **GitHub contribution**:
Please add the **[SYCL]** prefix/tag in issues/PRs titles to help the SYCL-team check/address them without delay.
diff --git a/examples/sycl/run-llama2.sh b/examples/sycl/run-llama2.sh
index a8cf0aa64..3b9ba3b2d 100755
--- a/examples/sycl/run-llama2.sh
+++ b/examples/sycl/run-llama2.sh
@@ -11,16 +11,17 @@ source /opt/intel/oneapi/setvars.sh
#ZES_ENABLE_SYSMAN=1, Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory. Recommended to use when --split-mode = layer.
INPUT_PROMPT="Building a website can be done in 10 simple steps:\nStep 1:"
-MODEL_FILE=llama-2-7b.Q4_0.gguf
+MODEL_FILE=models/llama-2-7b.Q4_0.gguf
NGL=33
+CONEXT=8192
if [ $# -gt 0 ]; then
GGML_SYCL_DEVICE=$1
echo "use $GGML_SYCL_DEVICE as main GPU"
#use signle GPU only
- ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -mg $GGML_SYCL_DEVICE -sm none
+ ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONEXT} -mg $GGML_SYCL_DEVICE -sm none
else
#use multiple GPUs with same max compute units
- ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0
+ ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONEXT}
fi
From a6a8f8d09c8e27a70926d8a52016ce8f49d954dd Mon Sep 17 00:00:00 2001
From: Neo Zhang Jianyu
Date: Tue, 17 Sep 2024 16:25:43 +0800
Subject: [PATCH 2/2] Update docs/backend/SYCL.md
Co-authored-by: Meng, Hengyu
---
docs/backend/SYCL.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md
index ff896af53..bc266f7d8 100644
--- a/docs/backend/SYCL.md
+++ b/docs/backend/SYCL.md
@@ -636,7 +636,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512
It's same for other projects including llama.cpp SYCL backend.
-- Meet issue: `Native API failed. Native API returns: -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -6 (PI_ERROR_OUT_OF_HOST_MEMORY)` or `failed to allocate SYCL0 buffer`
+- Meet issue: `Native API failed. Native API returns: -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -999 (UNKNOWN PI error)` or `failed to allocate SYCL0 buffer`
Device Memory is not enough.