ggml : automatic selection of best CPU backend (#10606)
* ggml : automatic selection of best CPU backend * amx : minor opt * add GGML_AVX_VNNI to enable avx-vnni, fix checks
This commit is contained in:
		
							parent
							
								
									86dc11c5bc
								
							
						
					
					
						commit
						3420909dff
					
				
					 12 changed files with 599 additions and 156 deletions
				
			
		|  | @ -3,22 +3,34 @@ ARG UBUNTU_VERSION=22.04 | |||
| FROM ubuntu:$UBUNTU_VERSION AS build | ||||
| 
 | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y build-essential git libcurl4-openssl-dev | ||||
|     apt-get install -y build-essential git cmake libcurl4-openssl-dev | ||||
| 
 | ||||
| WORKDIR /app | ||||
| 
 | ||||
| COPY . . | ||||
| 
 | ||||
| ENV LLAMA_CURL=1 | ||||
| 
 | ||||
| RUN make -j$(nproc) llama-server | ||||
| RUN \ | ||||
|     # Build multiple versions of the CPU backend | ||||
|     scripts/build-cpu.sh avx         -DGGML_AVX=ON -DGGML_AVX2=OFF && \ | ||||
|     scripts/build-cpu.sh avx2        -DGGML_AVX=ON -DGGML_AVX2=ON && \ | ||||
|     scripts/build-cpu.sh avx512      -DGGML_AVX=ON -DGGML_AVX2=ON -DGGML_AVX512=ON && \ | ||||
|     scripts/build-cpu.sh amx         -DGGML_AVX=ON -DGGML_AVX2=ON -DGGML_AVX512=ON -DGGML_AVX_VNNI=ON -DGGML_AVX512_VNNI=ON -DGGML_AMX_TILE=ON -DGGML_AMX_INT8=ON && \ | ||||
|     # Build llama-server | ||||
|     cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ | ||||
|     cmake --build build --target llama-server -j $(nproc) && \ | ||||
|     # Copy the built libraries to /app/lib | ||||
|     mkdir -p /app/lib && \ | ||||
|     mv libggml-cpu* /app/lib/ && \ | ||||
|     find build -name "*.so" -exec cp {} /app/lib/ \; | ||||
| 
 | ||||
| FROM ubuntu:$UBUNTU_VERSION AS runtime | ||||
| 
 | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y libcurl4-openssl-dev libgomp1 curl | ||||
| 
 | ||||
| COPY --from=build /app/llama-server /llama-server | ||||
| COPY --from=build /app/build/bin/llama-server /llama-server | ||||
| COPY --from=build /app/lib/ / | ||||
| 
 | ||||
| ENV LC_ALL=C.utf8 | ||||
| # Must be set to 0.0.0.0 so it can listen to requests from host machine | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue