Fix build shared ggml when CUDA is enabled

This commit is contained in:
Howard Su 2023-06-18 23:43:24 +08:00
parent 0ede372a51
commit 0c6392deff

View file

@ -465,6 +465,7 @@ add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
if (BUILD_SHARED_LIBS) if (BUILD_SHARED_LIBS)
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON) set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
add_library(ggml_shared SHARED $<TARGET_OBJECTS:ggml>) add_library(ggml_shared SHARED $<TARGET_OBJECTS:ggml>)
target_link_libraries(ggml_shared PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
endif() endif()
add_library(llama add_library(llama
@ -496,6 +497,10 @@ if (GGML_SOURCES_CUDA)
set_property(TARGET ggml_static PROPERTY CUDA_ARCHITECTURES OFF) set_property(TARGET ggml_static PROPERTY CUDA_ARCHITECTURES OFF)
set_property(TARGET ggml_static PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto") set_property(TARGET ggml_static PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
if (BUILD_SHARED_LIBS)
set_property(TARGET ggml_shared PROPERTY CUDA_ARCHITECTURES OFF)
set_property(TARGET ggml_shared PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
endif()
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF) set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
endif() endif()