testing LLAMA_PORTABLE flag for building

This commit is contained in:
Concedo 2023-11-06 20:15:15 +08:00
parent 93c4b2a9c6
commit 2102942121
3 changed files with 9 additions and 2 deletions

View file

@ -118,7 +118,7 @@ ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
FULLCFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx
else
# if not on windows, they are clearly building it themselves, so lets just use whatever is supported
ifdef LLAMA_COLAB
ifdef LLAMA_PORTABLE
CFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx
else
CFLAGS += -march=native -mtune=native
@ -152,11 +152,17 @@ ifdef LLAMA_CUBLAS
CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
NVCC = nvcc
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
ifdef CUDA_DOCKER_ARCH
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
else
ifdef LLAMA_PORTABLE
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=all-major
else
NVCCFLAGS += -arch=native
endif
endif # CUDA_DOCKER_ARCH
ifdef LLAMA_CUDA_FORCE_DMMV
NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
endif # LLAMA_CUDA_FORCE_DMMV

View file

@ -88,6 +88,7 @@ You can then run koboldcpp anywhere from the terminal by running `koboldcpp` to
- KoboldCpp has a few unofficial third-party community created docker images. Feel free to try them out, but do not expect up-to-date support:
- https://github.com/korewaChino/koboldCppDocker
- https://github.com/noneabove1182/koboldcpp-docker
- If you're building your own docker, remember to set CUDA_DOCKER_ARCH or enable LLAMA_PORTABLE
## Questions and Help
- **First, please check out [The KoboldCpp FAQ and Knowledgebase](https://github.com/LostRuins/koboldcpp/wiki) which may already have answers to your questions! Also please search through past issues and discussions.**

View file

@ -67,7 +67,7 @@
"!echo Finding prebuilt binary for {kvers}\r\n",
"!wget -O dlfile.tmp https://kcppcolab.concedo.workers.dev/?{kvers} && mv dlfile.tmp koboldcpp_cublas.so\r\n",
"!test -f koboldcpp_cublas.so && echo Prebuilt Binary Exists || echo Prebuilt Binary Does Not Exist\r\n",
"!test -f koboldcpp_cublas.so && echo Build Skipped || make koboldcpp_cublas LLAMA_CUBLAS=1 LLAMA_COLAB=1\r\n",
"!test -f koboldcpp_cublas.so && echo Build Skipped || make koboldcpp_cublas LLAMA_CUBLAS=1 LLAMA_PORTABLE=1\r\n",
"!cp koboldcpp_cublas.so koboldcpp_cublas.dat\r\n",
"!apt install aria2 -y\r\n",
"!aria2c -x 10 -o model.ggml --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\r\n",