Merge 18f15b104c
into d7b31a9d84
This commit is contained in:
commit
c9daaf8ffe
3 changed files with 92 additions and 38 deletions
37
.github/workflows/build.yml
vendored
37
.github/workflows/build.yml
vendored
|
@ -660,24 +660,10 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
|
|
||||||
- name: Build llama.cpp with CMake
|
|
||||||
id: cmake_build
|
|
||||||
run: |
|
|
||||||
sysctl -a
|
|
||||||
cmake -B build -G Xcode \
|
|
||||||
-DGGML_METAL_USE_BF16=ON \
|
|
||||||
-DGGML_METAL_EMBED_LIBRARY=ON \
|
|
||||||
-DLLAMA_BUILD_EXAMPLES=OFF \
|
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
|
||||||
-DLLAMA_BUILD_SERVER=OFF \
|
|
||||||
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
|
|
||||||
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
|
|
||||||
sudo cmake --install build --config Release
|
|
||||||
|
|
||||||
- name: xcodebuild for swift package
|
- name: xcodebuild for swift package
|
||||||
id: xcodebuild
|
id: xcodebuild
|
||||||
run: |
|
run: |
|
||||||
xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}"
|
xcodebuild -scheme llama -destination "${{ matrix.destination }}"
|
||||||
|
|
||||||
windows-msys2:
|
windows-msys2:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
@ -1284,27 +1270,6 @@ jobs:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Build
|
|
||||||
id: cmake_build
|
|
||||||
run: |
|
|
||||||
sysctl -a
|
|
||||||
cmake -B build -G Xcode \
|
|
||||||
-DGGML_METAL_USE_BF16=ON \
|
|
||||||
-DGGML_METAL_EMBED_LIBRARY=ON \
|
|
||||||
-DLLAMA_BUILD_EXAMPLES=OFF \
|
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
|
||||||
-DLLAMA_BUILD_SERVER=OFF \
|
|
||||||
-DCMAKE_SYSTEM_NAME=iOS \
|
|
||||||
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
|
|
||||||
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
|
||||||
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
|
|
||||||
sudo cmake --install build --config Release
|
|
||||||
|
|
||||||
- name: xcodebuild for swift package
|
|
||||||
id: xcodebuild
|
|
||||||
run: |
|
|
||||||
xcodebuild -scheme llama-Package -destination 'generic/platform=iOS'
|
|
||||||
|
|
||||||
- name: Build Xcode project
|
- name: Build Xcode project
|
||||||
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
|
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,74 @@
|
||||||
|
|
||||||
import PackageDescription
|
import PackageDescription
|
||||||
|
|
||||||
|
var sources = [
|
||||||
|
"src/llama.cpp",
|
||||||
|
"src/llama-arch.cpp",
|
||||||
|
"src/llama-vocab.cpp",
|
||||||
|
"src/llama-grammar.cpp",
|
||||||
|
"src/llama-sampling.cpp",
|
||||||
|
"src/llama-context.cpp",
|
||||||
|
"src/llama-cparams.cpp",
|
||||||
|
"src/llama-hparams.cpp",
|
||||||
|
"src/llama-model.cpp",
|
||||||
|
"src/llama-chat.cpp",
|
||||||
|
"src/llama-adapter.cpp",
|
||||||
|
"src/llama-batch.cpp",
|
||||||
|
"src/llama-grammar.cpp",
|
||||||
|
"src/llama-mmap.cpp",
|
||||||
|
"src/llama-model-loader.cpp",
|
||||||
|
"src/llama-quant.cpp",
|
||||||
|
"src/llama-impl.cpp",
|
||||||
|
"src/llama-kv-cache.cpp",
|
||||||
|
"src/unicode.cpp",
|
||||||
|
"src/unicode-data.cpp",
|
||||||
|
"ggml/src/gguf.cpp",
|
||||||
|
"ggml/src/ggml.c",
|
||||||
|
"ggml/src/ggml-alloc.c",
|
||||||
|
"ggml/src/ggml-backend.cpp",
|
||||||
|
"ggml/src/ggml-backend-reg.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu.c",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-hbm.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-traits.cpp",
|
||||||
|
"ggml/src/ggml-threading.cpp",
|
||||||
|
"ggml/src/ggml-quants.c",
|
||||||
|
]
|
||||||
|
|
||||||
|
var resources: [Resource] = []
|
||||||
|
var linkerSettings: [LinkerSetting] = []
|
||||||
|
var cSettings: [CSetting] = [
|
||||||
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
||||||
|
.unsafeFlags(["-fno-objc-arc"]),
|
||||||
|
.headerSearchPath("ggml/src"),
|
||||||
|
.headerSearchPath("ggml/src/ggml-cpu"),
|
||||||
|
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
||||||
|
// We should consider add this in the future when we drop support for iOS 14
|
||||||
|
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
||||||
|
// .define("ACCELERATE_NEW_LAPACK"),
|
||||||
|
// .define("ACCELERATE_LAPACK_ILP64")
|
||||||
|
.define("GGML_USE_CPU"),
|
||||||
|
]
|
||||||
|
|
||||||
|
#if canImport(Darwin)
|
||||||
|
sources.append("ggml/src/ggml-common.h")
|
||||||
|
sources.append("ggml/src/ggml-metal/ggml-metal.m")
|
||||||
|
resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
|
||||||
|
linkerSettings.append(.linkedFramework("Accelerate"))
|
||||||
|
cSettings.append(
|
||||||
|
contentsOf: [
|
||||||
|
.define("GGML_USE_ACCELERATE"),
|
||||||
|
.define("GGML_USE_METAL"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if os(Linux)
|
||||||
|
cSettings.append(.define("_GNU_SOURCE"))
|
||||||
|
#endif
|
||||||
|
|
||||||
let package = Package(
|
let package = Package(
|
||||||
name: "llama",
|
name: "llama",
|
||||||
platforms: [
|
platforms: [
|
||||||
|
@ -14,6 +82,26 @@ let package = Package(
|
||||||
.library(name: "llama", targets: ["llama"]),
|
.library(name: "llama", targets: ["llama"]),
|
||||||
],
|
],
|
||||||
targets: [
|
targets: [
|
||||||
.systemLibrary(name: "llama", pkgConfig: "llama"),
|
.target(
|
||||||
]
|
name: "llama",
|
||||||
|
path: ".",
|
||||||
|
exclude: [
|
||||||
|
"build",
|
||||||
|
"cmake",
|
||||||
|
"examples",
|
||||||
|
"scripts",
|
||||||
|
"models",
|
||||||
|
"tests",
|
||||||
|
"CMakeLists.txt",
|
||||||
|
"Makefile",
|
||||||
|
"ggml/src/ggml-metal-embed.metal"
|
||||||
|
],
|
||||||
|
sources: sources,
|
||||||
|
resources: resources,
|
||||||
|
publicHeadersPath: "spm-headers",
|
||||||
|
cSettings: cSettings,
|
||||||
|
linkerSettings: linkerSettings
|
||||||
|
)
|
||||||
|
],
|
||||||
|
cxxLanguageStandard: .cxx17
|
||||||
)
|
)
|
||||||
|
|
1
spm-headers/gguf.h
Symbolic link
1
spm-headers/gguf.h
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../ggml/include/gguf.h
|
Loading…
Add table
Add a link
Reference in a new issue