rocm: Automatically build externally
A separate CMake project allows setting a separate compiler specifically for ROCm. This also makes the compile config easier eliminating the need for setting the ROCm compiler path. Most importantly, this lets me build llama.cpp with GCC as I get errors while trying to embed this in a Qt application otherwise.
This commit is contained in:
parent
7ddf185537
commit
e2d3353010
6 changed files with 118 additions and 28 deletions
|
@ -372,6 +372,8 @@ if (LLAMA_CLBLAST)
|
|||
endif()
|
||||
|
||||
if (LLAMA_HIPBLAS)
|
||||
if (WIN32)
|
||||
# todo: also allow building with a separate compiler on windows
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
|
||||
|
||||
if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")
|
||||
|
@ -389,15 +391,13 @@ if (LLAMA_HIPBLAS)
|
|||
message(STATUS "HIP and hipBLAS found")
|
||||
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS)
|
||||
add_library(ggml-rocm OBJECT ggml-cuda.cu ggml-cuda.h)
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set_target_properties(ggml-rocm PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
if (LLAMA_CUDA_FORCE_DMMV)
|
||||
target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_FORCE_DMMV)
|
||||
endif()
|
||||
target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
||||
target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||
target_compile_definitions(ggml-rocm PRIVATE K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||
target_compile_definitions(ggml-rocm PRIVATE CC_TURING=1000000000)
|
||||
set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX)
|
||||
target_link_libraries(ggml-rocm PRIVATE hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
|
||||
|
||||
|
@ -408,6 +408,28 @@ if (LLAMA_HIPBLAS)
|
|||
else()
|
||||
message(WARNING "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm")
|
||||
endif()
|
||||
else()
|
||||
if (LLAMA_STATIC)
|
||||
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
||||
endif()
|
||||
include(ExternalProject)
|
||||
if (NOT DEFINED LLAMA_ROCM_PROJECT_CMAKE_ARGS)
|
||||
set(LLAMA_ROCM_PROJECT_CMAKE_ARGS -DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/clang++ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE})
|
||||
endif()
|
||||
ExternalProject_Add(ggml-rocm-project
|
||||
SOURCE_DIR ../ggml-rocm
|
||||
PREFIX ggml-rocm
|
||||
INSTALL_COMMAND ""
|
||||
CMAKE_ARGS ${LLAMA_ROCM_PROJECT_CMAKE_ARGS}
|
||||
BUILD_BYPRODUCTS ${CMAKE_BINARY_DIR}/bin/libggml-rocm.so
|
||||
)
|
||||
add_compile_definitions(GGML_USE_CUBLAS)
|
||||
add_library(ggml-rocm SHARED IMPORTED)
|
||||
add_dependencies(ggml-rocm ggml-rocm-project)
|
||||
set_target_properties(ggml-rocm PROPERTIES IMPORTED_LOCATION ${CMAKE_BINARY_DIR}/bin/libggml-rocm.so)
|
||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ggml-rocm)
|
||||
set(GGML_SOURCES_EXTRA ${GGML_SOURCES_EXTRA} ggml-cuda.h)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (LLAMA_ALL_WARNINGS)
|
||||
|
@ -762,6 +784,14 @@ if (LLAMA_METAL)
|
|||
WORLD_READ
|
||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
endif()
|
||||
install(
|
||||
FILES ${CMAKE_BINARY_DIR}/../ggml-rocm/src/ggml-rocm-project-build/libggml-rocm.so
|
||||
PERMISSIONS
|
||||
OWNER_READ
|
||||
OWNER_WRITE
|
||||
GROUP_READ
|
||||
WORLD_READ
|
||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
#
|
||||
# programs, examples and tests
|
||||
|
|
57
ggml-rocm/CMakeLists.txt
Normal file
57
ggml-rocm/CMakeLists.txt
Normal file
|
@ -0,0 +1,57 @@
|
|||
cmake_minimum_required(VERSION 3.12) # Don't bump this version for no reason
|
||||
project(ggml-rocm CXX)
|
||||
|
||||
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
||||
set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels")
|
||||
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
||||
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED true)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
|
||||
|
||||
if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
||||
message(FATAL_ERROR "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
|
||||
endif()
|
||||
|
||||
find_package(hip)
|
||||
find_package(hipblas)
|
||||
find_package(rocblas)
|
||||
|
||||
if (${hipblas_FOUND} AND ${hip_FOUND})
|
||||
message(STATUS "HIP and hipBLAS found")
|
||||
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS)
|
||||
add_library(ggml-rocm SHARED ggml-cuda.cu ggml-cuda.h ggml.h)
|
||||
set_target_properties(ggml-rocm PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
if (LLAMA_CUDA_FORCE_DMMV)
|
||||
target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_FORCE_DMMV)
|
||||
endif()
|
||||
target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
||||
target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||
target_compile_definitions(ggml-rocm PRIVATE K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||
target_compile_definitions(ggml-rocm PRIVATE CC_TURING=1000000000)
|
||||
set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX)
|
||||
set_target_properties(ggml-rocm PROPERTIES LINKER_LANGUAGE CXX LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/../../../bin)
|
||||
target_link_libraries(ggml-rocm PUBLIC hip::device hip::host roc::rocblas roc::hipblas)
|
||||
else()
|
||||
message(FATAL_ERROR "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm")
|
||||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
set(cxx_flags
|
||||
-Wall
|
||||
-Wextra
|
||||
-Wpedantic
|
||||
-Wcast-qual
|
||||
-Wno-unused-function
|
||||
-Wno-multichar
|
||||
)
|
||||
else()
|
||||
# todo : msvc
|
||||
endif()
|
||||
|
||||
add_compile_options(
|
||||
"$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>"
|
||||
)
|
1
ggml-rocm/ggml-cuda.cu
Symbolic link
1
ggml-rocm/ggml-cuda.cu
Symbolic link
|
@ -0,0 +1 @@
|
|||
../ggml-cuda.cu
|
1
ggml-rocm/ggml-cuda.h
Symbolic link
1
ggml-rocm/ggml-cuda.h
Symbolic link
|
@ -0,0 +1 @@
|
|||
../ggml-cuda.h
|
1
ggml-rocm/ggml.h
Symbolic link
1
ggml-rocm/ggml.h
Symbolic link
|
@ -0,0 +1 @@
|
|||
../ggml.h
|
|
@ -40,4 +40,4 @@ llama_build_and_test_executable(test-grad0.cpp) # SLOW
|
|||
# dummy executable - not installed
|
||||
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
||||
add_executable(${TEST_TARGET} test-c.c)
|
||||
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
||||
target_link_libraries(${TEST_TARGET} PRIVATE llama ggml)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue