updated CMakeLists.txt and added JNI implementation to support building this library as a dependency in Android Studio with NDK

This commit is contained in:
George 2023-06-05 17:25:51 +08:00
parent d1f563a743
commit c3b4efc89e
2 changed files with 162 additions and 121 deletions

View file

@ -3,7 +3,7 @@ project("llama.cpp" C CXX)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) if(NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
endif() endif()
@ -13,25 +13,24 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
set(LLAMA_STANDALONE ON) set(LLAMA_STANDALONE ON)
# configure project version # configure project version
# TODO # TODO
else() else()
set(LLAMA_STANDALONE OFF) set(LLAMA_STANDALONE OFF)
endif() endif()
if (EMSCRIPTEN) if(EMSCRIPTEN)
set(BUILD_SHARED_LIBS_DEFAULT OFF) set(BUILD_SHARED_LIBS_DEFAULT OFF)
option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON) option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON)
else() else()
if (MINGW) if(MINGW)
set(BUILD_SHARED_LIBS_DEFAULT OFF) set(BUILD_SHARED_LIBS_DEFAULT OFF)
else() else()
set(BUILD_SHARED_LIBS_DEFAULT ON) set(BUILD_SHARED_LIBS_DEFAULT ON)
endif() endif()
endif() endif()
# #
# Option list # Option list
# #
@ -58,8 +57,9 @@ option(LLAMA_AVX512 "llama: enable AVX512"
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF) option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF) option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
option(LLAMA_FMA "llama: enable FMA" ON) option(LLAMA_FMA "llama: enable FMA" ON)
# in MSVC F16C is implied with AVX2/AVX512 # in MSVC F16C is implied with AVX2/AVX512
if (NOT MSVC) if(NOT MSVC)
option(LLAMA_F16C "llama: enable F16C" ON) option(LLAMA_F16C "llama: enable F16C" ON)
endif() endif()
@ -113,7 +113,6 @@ endif()
# #
# Compile flags # Compile flags
# #
set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED true) set(CMAKE_CXX_STANDARD_REQUIRED true)
set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD 11)
@ -121,26 +120,27 @@ set(CMAKE_C_STANDARD_REQUIRED true)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
if (NOT MSVC) if(NOT MSVC)
if (LLAMA_SANITIZE_THREAD) if(LLAMA_SANITIZE_THREAD)
add_compile_options(-fsanitize=thread) add_compile_options(-fsanitize=thread)
link_libraries(-fsanitize=thread) link_libraries(-fsanitize=thread)
endif() endif()
if (LLAMA_SANITIZE_ADDRESS) if(LLAMA_SANITIZE_ADDRESS)
add_compile_options(-fsanitize=address -fno-omit-frame-pointer) add_compile_options(-fsanitize=address -fno-omit-frame-pointer)
link_libraries(-fsanitize=address) link_libraries(-fsanitize=address)
endif() endif()
if (LLAMA_SANITIZE_UNDEFINED) if(LLAMA_SANITIZE_UNDEFINED)
add_compile_options(-fsanitize=undefined) add_compile_options(-fsanitize=undefined)
link_libraries(-fsanitize=undefined) link_libraries(-fsanitize=undefined)
endif() endif()
endif() endif()
if (APPLE AND LLAMA_ACCELERATE) if(APPLE AND LLAMA_ACCELERATE)
find_library(ACCELERATE_FRAMEWORK Accelerate) find_library(ACCELERATE_FRAMEWORK Accelerate)
if (ACCELERATE_FRAMEWORK)
if(ACCELERATE_FRAMEWORK)
message(STATUS "Accelerate framework found") message(STATUS "Accelerate framework found")
add_compile_definitions(GGML_USE_ACCELERATE) add_compile_definitions(GGML_USE_ACCELERATE)
@ -150,16 +150,19 @@ if (APPLE AND LLAMA_ACCELERATE)
endif() endif()
endif() endif()
if (LLAMA_BLAS) if(LLAMA_BLAS)
if (LLAMA_STATIC) if(LLAMA_STATIC)
set(BLA_STATIC ON) set(BLA_STATIC ON)
endif() endif()
if ($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22)
if($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22)
set(BLA_SIZEOF_INTEGER 8) set(BLA_SIZEOF_INTEGER 8)
endif() endif()
set(BLA_VENDOR ${LLAMA_BLAS_VENDOR}) set(BLA_VENDOR ${LLAMA_BLAS_VENDOR})
find_package(BLAS) find_package(BLAS)
if (BLAS_FOUND)
if(BLAS_FOUND)
message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}")
add_compile_options(${BLAS_LINKER_FLAGS}) add_compile_options(${BLAS_LINKER_FLAGS})
@ -175,11 +178,12 @@ if (LLAMA_BLAS)
endif() endif()
endif() endif()
if (LLAMA_CUBLAS) if(LLAMA_CUBLAS)
cmake_minimum_required(VERSION 3.17) cmake_minimum_required(VERSION 3.17)
find_package(CUDAToolkit) find_package(CUDAToolkit)
if (CUDAToolkit_FOUND)
if(CUDAToolkit_FOUND)
message(STATUS "cuBLAS found") message(STATUS "cuBLAS found")
enable_language(CUDA) enable_language(CUDA)
@ -190,7 +194,7 @@ if (LLAMA_CUBLAS)
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
add_compile_definitions(GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y}) add_compile_definitions(GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y})
if (LLAMA_STATIC) if(LLAMA_STATIC)
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
else() else()
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
@ -201,7 +205,7 @@ if (LLAMA_CUBLAS)
endif() endif()
endif() endif()
if (LLAMA_METAL) if(LLAMA_METAL)
find_library(FOUNDATION_LIBRARY Foundation REQUIRED) find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
find_library(METAL_FRAMEWORK Metal REQUIRED) find_library(METAL_FRAMEWORK Metal REQUIRED)
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
@ -213,7 +217,7 @@ if (LLAMA_METAL)
add_compile_definitions(GGML_METAL_NDEBUG) add_compile_definitions(GGML_METAL_NDEBUG)
# get full path to the file # get full path to the file
#add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/") # add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")
# copy ggml-metal.metal to bin directory # copy ggml-metal.metal to bin directory
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY) configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
@ -226,9 +230,10 @@ if (LLAMA_METAL)
) )
endif() endif()
if (LLAMA_CLBLAST) if(LLAMA_CLBLAST)
find_package(CLBlast) find_package(CLBlast)
if (CLBlast_FOUND)
if(CLBlast_FOUND)
message(STATUS "CLBlast found") message(STATUS "CLBlast found")
set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h) set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
@ -241,8 +246,8 @@ if (LLAMA_CLBLAST)
endif() endif()
endif() endif()
if (LLAMA_ALL_WARNINGS) if(LLAMA_ALL_WARNINGS)
if (NOT MSVC) if(NOT MSVC)
set(c_flags set(c_flags
-Wall -Wall
-Wextra -Wextra
@ -269,21 +274,21 @@ if (LLAMA_ALL_WARNINGS)
"$<$<COMPILE_LANGUAGE:C>:${c_flags}>" "$<$<COMPILE_LANGUAGE:C>:${c_flags}>"
"$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>" "$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>"
) )
endif() endif()
if (MSVC) if(MSVC)
add_compile_definitions(_CRT_SECURE_NO_WARNINGS) add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
if (BUILD_SHARED_LIBS) if(BUILD_SHARED_LIBS)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
endif() endif()
endif() endif()
if (LLAMA_LTO) if(LLAMA_LTO)
include(CheckIPOSupported) include(CheckIPOSupported)
check_ipo_supported(RESULT result OUTPUT output) check_ipo_supported(RESULT result OUTPUT output)
if (result)
if(result)
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
else() else()
message(WARNING "IPO is not supported: ${output}") message(WARNING "IPO is not supported: ${output}")
@ -294,97 +299,123 @@ endif()
# TODO: probably these flags need to be tweaked on some architectures # TODO: probably these flags need to be tweaked on some architectures
# feel free to update the Makefile for your architecture and send a pull request or issue # feel free to update the Makefile for your architecture and send a pull request or issue
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
if (NOT MSVC)
if (LLAMA_STATIC) if(NOT MSVC)
if(LLAMA_STATIC)
add_link_options(-static) add_link_options(-static)
if (MINGW)
if(MINGW)
add_link_options(-static-libgcc -static-libstdc++) add_link_options(-static-libgcc -static-libstdc++)
endif() endif()
endif() endif()
if (LLAMA_GPROF)
if(LLAMA_GPROF)
add_compile_options(-pg) add_compile_options(-pg)
endif() endif()
if (LLAMA_NATIVE)
if(LLAMA_NATIVE)
add_compile_options(-march=native) add_compile_options(-march=native)
endif() endif()
endif() endif()
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
message(STATUS "ARM detected") message(STATUS "ARM detected")
if (MSVC)
if(MSVC)
# TODO: arm msvc? # TODO: arm msvc?
else() else()
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
# Apple M1, M2, etc. # Apple M1, M2, etc.
# Raspberry Pi 3, 4, Zero 2 (64-bit) # Raspberry Pi 3, 4, Zero 2 (64-bit)
if(NOT DEFINED ANDROID_NDK)
add_compile_options(-mcpu=native) add_compile_options(-mcpu=native)
endif() endif()
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6") endif()
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6")
# Raspberry Pi 1, Zero # Raspberry Pi 1, Zero
if(NOT DEFINED ANDROID_NDK)
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access) add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access)
endif() endif()
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7") endif()
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7")
# Raspberry Pi 2 # Raspberry Pi 2
if(NOT DEFINED ANDROID_NDK)
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations) add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations)
endif() endif()
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8") endif()
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8")
# Raspberry Pi 3, 4, Zero 2 (32-bit) # Raspberry Pi 3, 4, Zero 2 (32-bit)
if(NOT DEFINED ANDROID_NDK)
add_compile_options(-mfp16-format=ieee -mno-unaligned-access) add_compile_options(-mfp16-format=ieee -mno-unaligned-access)
endif() endif()
endif() endif()
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") endif()
elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$")
message(STATUS "x86 detected") message(STATUS "x86 detected")
if (MSVC)
if (LLAMA_AVX512) if(MSVC)
if(LLAMA_AVX512)
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX512>) add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX512>)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX512>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX512>)
# MSVC has no compile-time flags enabling specific # MSVC has no compile-time flags enabling specific
# AVX512 extensions, neither it defines the # AVX512 extensions, neither it defines the
# macros corresponding to the extensions. # macros corresponding to the extensions.
# Do it manually. # Do it manually.
if (LLAMA_AVX512_VBMI) if(LLAMA_AVX512_VBMI)
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>) add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>) add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
endif() endif()
if (LLAMA_AVX512_VNNI)
if(LLAMA_AVX512_VNNI)
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>) add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>) add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
endif() endif()
elseif (LLAMA_AVX2) elseif(LLAMA_AVX2)
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX2>) add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX2>)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX2>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX2>)
elseif (LLAMA_AVX) elseif(LLAMA_AVX)
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX>) add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX>)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>)
endif() endif()
else() else()
if (LLAMA_F16C) if(LLAMA_F16C)
add_compile_options(-mf16c) add_compile_options(-mf16c)
endif() endif()
if (LLAMA_FMA)
if(LLAMA_FMA)
add_compile_options(-mfma) add_compile_options(-mfma)
endif() endif()
if (LLAMA_AVX)
if(LLAMA_AVX)
add_compile_options(-mavx) add_compile_options(-mavx)
endif() endif()
if (LLAMA_AVX2)
if(LLAMA_AVX2)
add_compile_options(-mavx2) add_compile_options(-mavx2)
endif() endif()
if (LLAMA_AVX512)
if(LLAMA_AVX512)
add_compile_options(-mavx512f) add_compile_options(-mavx512f)
add_compile_options(-mavx512bw) add_compile_options(-mavx512bw)
endif() endif()
if (LLAMA_AVX512_VBMI)
if(LLAMA_AVX512_VBMI)
add_compile_options(-mavx512vbmi) add_compile_options(-mavx512vbmi)
endif() endif()
if (LLAMA_AVX512_VNNI)
if(LLAMA_AVX512_VNNI)
add_compile_options(-mavx512vnni) add_compile_options(-mavx512vnni)
endif() endif()
endif() endif()
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
message(STATUS "PowerPC detected") message(STATUS "PowerPC detected")
add_compile_options(-mcpu=native -mtune=native) add_compile_options(-mcpu=native -mtune=native)
#TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
# TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
else() else()
message(STATUS "Unknown architecture") message(STATUS "Unknown architecture")
endif() endif()
@ -392,20 +423,19 @@ endif()
# #
# Build libraries # Build libraries
# #
add_library(ggml OBJECT add_library(ggml OBJECT
ggml.c ggml.c
ggml.h ggml.h
${GGML_SOURCES_CUDA} ${GGML_SOURCES_CUDA}
${GGML_SOURCES_OPENCL} ${GGML_SOURCES_OPENCL}
${GGML_SOURCES_METAL} ${GGML_SOURCES_METAL}
) )
target_include_directories(ggml PUBLIC .) target_include_directories(ggml PUBLIC .)
target_compile_features(ggml PUBLIC c_std_11) # don't bump target_compile_features(ggml PUBLIC c_std_11) # don't bump
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS}) target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
if (BUILD_SHARED_LIBS) if(BUILD_SHARED_LIBS)
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON) set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
endif() endif()
@ -413,38 +443,37 @@ add_library(llama
llama.cpp llama.cpp
llama.h llama.h
llama-util.h llama-util.h
) llama-jni.cpp
)
target_include_directories(llama PUBLIC .) target_include_directories(llama PUBLIC .)
target_compile_features(llama PUBLIC cxx_std_11) # don't bump target_compile_features(llama PUBLIC cxx_std_11) # don't bump
target_link_libraries(llama PRIVATE target_link_libraries(llama PRIVATE
ggml ggml
${LLAMA_EXTRA_LIBS} ${LLAMA_EXTRA_LIBS}
) )
if (BUILD_SHARED_LIBS) if(BUILD_SHARED_LIBS)
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON) set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD) target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)
endif() endif()
if (GGML_SOURCES_CUDA) if(GGML_SOURCES_CUDA)
message(STATUS "GGML CUDA sources found, configuring CUDA architecture") message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF) set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF)
set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto") set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF) set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
endif() endif()
# #
# programs, examples and tests # programs, examples and tests
# #
if(LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
include(CTest) include(CTest)
add_subdirectory(tests) add_subdirectory(tests)
endif () endif()
if (LLAMA_BUILD_EXAMPLES) if(LLAMA_BUILD_EXAMPLES)
add_subdirectory(examples) add_subdirectory(examples)
add_subdirectory(pocs) add_subdirectory(pocs)
endif() endif()

12
llama-jni.cpp Normal file
View file

@ -0,0 +1,12 @@
#include <jni.h>
#include "llama.h"
//
// Created by gcpth on 05/06/2023.
//
extern "C"
JNIEXPORT void JNICALL
Java_com_layla_LlamaCpp_llama_1init_1backend(JNIEnv *env, jclass clazz) {
llama_init_backend();
}