diff --git a/.github/workflows/server-test.yml b/.github/workflows/server-test.yml index c39e5dd1d..9e5a5cd8d 100644 --- a/.github/workflows/server-test.yml +++ b/.github/workflows/server-test.yml @@ -40,12 +40,12 @@ jobs: - name: Download test model id: download_model run: | - ./scripts/hf.sh --repo TheBloke/Tinyllama-2-1b-miniguanaco-GGUF --file tinyllama-2-1b-miniguanaco.Q2_K.gguf + ./scripts/hf.sh --repo ngxson/dummy-llama --file llama_xs_q4.bin - name: Server Integration Tests id: server_integration_test run: | cd examples/server/tests - ./tests.sh ../../../tinyllama-2-1b-miniguanaco.Q2_K.gguf + ./tests.sh ../../../llama_xs_q4.bin diff --git a/examples/server/tests/README.md b/examples/server/tests/README.md index 975fee848..ae8ae74f3 100644 --- a/examples/server/tests/README.md +++ b/examples/server/tests/README.md @@ -7,5 +7,5 @@ Functional server tests suite. ### Run tests 1. Build the server -2. download a GGUF model: `../../../scripts/hf.sh --repo TheBloke/Tinyllama-2-1b-miniguanaco-GGUF --file tinyllama-2-1b-miniguanaco.Q2_K.gguf` +2. download a GGUF model: `../../../scripts/hf.sh --repo ngxson/dummy-llama --file llama_xs_q4.bin` 3. Start the test: `./tests.sh tinyllama-2-1b-miniguanaco.Q2_K.gguf -ngl 23 --log-disable`