Merge branch 'concedo' into concedo_experimental

This commit is contained in:
Concedo 2023-11-06 20:16:07 +08:00
commit 372cfef2c3
3 changed files with 9 additions and 2 deletions

View file

@ -118,7 +118,7 @@ ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
FULLCFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx FULLCFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx
else else
# if not on windows, they are clearly building it themselves, so lets just use whatever is supported # if not on windows, they are clearly building it themselves, so lets just use whatever is supported
ifdef LLAMA_COLAB ifdef LLAMA_PORTABLE
CFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx CFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx
else else
CFLAGS += -march=native -mtune=native CFLAGS += -march=native -mtune=native
@ -152,11 +152,17 @@ ifdef LLAMA_CUBLAS
CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
NVCC = nvcc NVCC = nvcc
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
ifdef CUDA_DOCKER_ARCH ifdef CUDA_DOCKER_ARCH
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH) NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
else
ifdef LLAMA_PORTABLE
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=all-major
else else
NVCCFLAGS += -arch=native NVCCFLAGS += -arch=native
endif
endif # CUDA_DOCKER_ARCH endif # CUDA_DOCKER_ARCH
ifdef LLAMA_CUDA_FORCE_DMMV ifdef LLAMA_CUDA_FORCE_DMMV
NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
endif # LLAMA_CUDA_FORCE_DMMV endif # LLAMA_CUDA_FORCE_DMMV

View file

@ -88,6 +88,7 @@ You can then run koboldcpp anywhere from the terminal by running `koboldcpp` to
- KoboldCpp has a few unofficial third-party community created docker images. Feel free to try them out, but do not expect up-to-date support: - KoboldCpp has a few unofficial third-party community created docker images. Feel free to try them out, but do not expect up-to-date support:
- https://github.com/korewaChino/koboldCppDocker - https://github.com/korewaChino/koboldCppDocker
- https://github.com/noneabove1182/koboldcpp-docker - https://github.com/noneabove1182/koboldcpp-docker
- If you're building your own docker, remember to set CUDA_DOCKER_ARCH or enable LLAMA_PORTABLE
## Questions and Help ## Questions and Help
- **First, please check out [The KoboldCpp FAQ and Knowledgebase](https://github.com/LostRuins/koboldcpp/wiki) which may already have answers to your questions! Also please search through past issues and discussions.** - **First, please check out [The KoboldCpp FAQ and Knowledgebase](https://github.com/LostRuins/koboldcpp/wiki) which may already have answers to your questions! Also please search through past issues and discussions.**

View file

@ -67,7 +67,7 @@
"!echo Finding prebuilt binary for {kvers}\r\n", "!echo Finding prebuilt binary for {kvers}\r\n",
"!wget -O dlfile.tmp https://kcppcolab.concedo.workers.dev/?{kvers} && mv dlfile.tmp koboldcpp_cublas.so\r\n", "!wget -O dlfile.tmp https://kcppcolab.concedo.workers.dev/?{kvers} && mv dlfile.tmp koboldcpp_cublas.so\r\n",
"!test -f koboldcpp_cublas.so && echo Prebuilt Binary Exists || echo Prebuilt Binary Does Not Exist\r\n", "!test -f koboldcpp_cublas.so && echo Prebuilt Binary Exists || echo Prebuilt Binary Does Not Exist\r\n",
"!test -f koboldcpp_cublas.so && echo Build Skipped || make koboldcpp_cublas LLAMA_CUBLAS=1 LLAMA_COLAB=1\r\n", "!test -f koboldcpp_cublas.so && echo Build Skipped || make koboldcpp_cublas LLAMA_CUBLAS=1 LLAMA_PORTABLE=1\r\n",
"!cp koboldcpp_cublas.so koboldcpp_cublas.dat\r\n", "!cp koboldcpp_cublas.so koboldcpp_cublas.dat\r\n",
"!apt install aria2 -y\r\n", "!apt install aria2 -y\r\n",
"!aria2c -x 10 -o model.ggml --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\r\n", "!aria2c -x 10 -o model.ggml --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\r\n",