diff --git a/.devops/llama-cli-cann.Dockerfile b/.devops/llama-cli-cann.Dockerfile index db5ba2f25..45c0585b0 100644 --- a/.devops/llama-cli-cann.Dockerfile +++ b/.devops/llama-cli-cann.Dockerfile @@ -1,6 +1,6 @@ ARG ASCEND_VERSION=8.0.rc2.alpha003-910b-openeuler22.03-py3.8 -FROM cosdt/cann:$ASCEND_VERSION AS build +FROM ascendai/cann:$ASCEND_VERSION AS build WORKDIR /app @@ -26,7 +26,7 @@ RUN echo "Building with static libs" && \ cmake --build build --config Release --target llama-cli # TODO: use image with NNRT -FROM cosdt/cann:$ASCEND_VERSION AS runtime +FROM ascendai/cann:$ASCEND_VERSION AS runtime COPY --from=build /app/build/bin/llama-cli /llama-cli ENV LC_ALL=C.utf8 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d6a7b66a5..c770bbd15 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -930,7 +930,7 @@ jobs: shell: bash env: - WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7dff44ba-e3af-4448-841c-0d616c8da6e7/w_BaseKit_p_2024.1.0.595_offline.exe + WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/b380d914-366b-4b77-a74a-05e3c38b3514/intel-oneapi-base-toolkit-2025.0.0.882_offline.exe WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI" steps: diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md index bc8c0f886..38185f738 100644 --- a/docs/backend/SYCL.md +++ b/docs/backend/SYCL.md @@ -41,6 +41,8 @@ The following release is verified with good quality: ## News +- 2024.11 + - Use syclcompat to improve the performance on some platforms. This requires to use oneAPI 2025.0 or newer. - 2024.8 - Use oneDNN as the default GEMM library, improve the compatibility for new Intel GPUs. diff --git a/docs/build.md b/docs/build.md index 95512415a..52de2b4e2 100644 --- a/docs/build.md +++ b/docs/build.md @@ -375,7 +375,7 @@ cmake --build build --config release You can test with: -`./build/llama-cli -m PATH_TO_MODEL -p "Building a website can be done in 10 steps:" -ngl 32` +`./build/bin/llama-cli -m PATH_TO_MODEL -p "Building a website can be done in 10 steps:" -ngl 32` If the fllowing info is output on screen, you are using `llama.cpp by CANN backend`: ```bash diff --git a/examples/server/public/index.html b/examples/server/public/index.html index 55639a944..65a915d59 100644 --- a/examples/server/public/index.html +++ b/examples/server/public/index.html @@ -12,7 +12,7 @@ .markdown { h1, h2, h3, h4, h5, h6, ul, ol, li { all: revert; } pre { - @apply whitespace-pre-wrap my-4 rounded-lg p-2; + @apply whitespace-pre-wrap rounded-lg p-2; border: 1px solid currentColor; } /* TODO: fix markdown table */ @@ -25,8 +25,11 @@ .bg-base-200 {background-color: var(--fallback-b2,oklch(var(--b2)/1))} .bg-base-300 {background-color: var(--fallback-b3,oklch(var(--b3)/1))} .text-base-content {color: var(--fallback-bc,oklch(var(--bc)/1))} + .show-on-hover { + @apply opacity-0 group-hover:opacity-100; + } .btn-mini { - @apply cursor-pointer opacity-0 group-hover:opacity-100 hover:shadow-md; + @apply cursor-pointer hover:shadow-md; } .chat-screen { max-width: 900px; } /* because the default bubble color is quite dark, we will make a custom one using bg-base-300 */ @@ -152,14 +155,14 @@
- - -
@@ -196,12 +199,13 @@

Settings

Settings below are saved in browser's localStorage

+ @@ -209,7 +213,7 @@ Other sampler settings
@@ -218,7 +222,7 @@ Penalties settings
@@ -245,7 +249,7 @@
-