diff --git a/scripts/ci-run.sh b/scripts/ci-run.sh index 2ea6e23be..06b5d9c6e 100755 --- a/scripts/ci-run.sh +++ b/scripts/ci-run.sh @@ -10,10 +10,11 @@ usage: ci-run.sh [] This script wraps ci/run.sh: -* If is a ramdisk, you can reduce writes to your SSD. - (~30GB per run with openllama_3b_v2) +* If is a ramdisk, you can reduce writes to your SSD. If is not a ramdisk, keep in mind that total writes will increase by the size of . + (openllama_3b_v2: quantized models are about 30GB) * Persistent model and data files are synced to and from , excluding generated .gguf files. + (openllama_3b_v2: persistent files are about 6.6GB) * defaults to ~/.cache/llama.cpp EOF exit 1