[swift] add module omnivlm (#41)
* support omnivlm for ios * add swift test cases * update test case
This commit is contained in:
parent
1487d32b46
commit
9201de2b49
5 changed files with 164 additions and 74 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1 +1,2 @@
|
||||||
build/**
|
build/**
|
||||||
|
.build/**
|
142
Package.swift
142
Package.swift
|
@ -22,68 +22,124 @@ var sources = [
|
||||||
"ggml/src/ggml-quants.c",
|
"ggml/src/ggml-quants.c",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
var omniVlmSources = [
|
||||||
|
"common/log.h",
|
||||||
|
"common/log.cpp",
|
||||||
|
"common/arg.h",
|
||||||
|
"common/arg.cpp",
|
||||||
|
"common/common.cpp",
|
||||||
|
"common/common.h",
|
||||||
|
"common/json.hpp",
|
||||||
|
"common/json-schema-to-grammar.cpp",
|
||||||
|
"common/json-schema-to-grammar.h",
|
||||||
|
"src/llama-grammar.h",
|
||||||
|
"common/grammar-parser.cpp",
|
||||||
|
"common/grammar-parser.h",
|
||||||
|
"common/sampling.cpp",
|
||||||
|
"common/sampling.h",
|
||||||
|
"examples/omni-vlm/build-info.cpp",
|
||||||
|
"examples/omni-vlm/clip.cpp",
|
||||||
|
"examples/omni-vlm/clip.h",
|
||||||
|
"examples/omni-vlm/omni-vlm-wrapper.cpp",
|
||||||
|
"examples/omni-vlm/omni-vlm-wrapper.h",
|
||||||
|
"examples/omni-vlm/omni-vlm.h",
|
||||||
|
"examples/omni-vlm/omni-vlm.cpp",
|
||||||
|
"common/base64.cpp",
|
||||||
|
"ggml/include/ggml.h",
|
||||||
|
"ggml/include/ggml-alloc.h",
|
||||||
|
"ggml/include/ggml-backend.h",
|
||||||
|
"ggml/src/ggml-common.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
var testSources = [
|
||||||
|
"swift/LlavaTests/LlavaTests.swift"
|
||||||
|
]
|
||||||
|
|
||||||
var resources: [Resource] = []
|
var resources: [Resource] = []
|
||||||
var linkerSettings: [LinkerSetting] = []
|
var linkerSettings: [LinkerSetting] = []
|
||||||
var cSettings: [CSetting] = [
|
var cSettings: [CSetting] = [
|
||||||
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
||||||
.unsafeFlags(["-fno-objc-arc"]),
|
.unsafeFlags(["-fno-objc-arc"]),
|
||||||
|
.headerSearchPath("."),
|
||||||
.headerSearchPath("ggml/src"),
|
.headerSearchPath("ggml/src"),
|
||||||
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
.headerSearchPath("common"),
|
||||||
// We should consider add this in the future when we drop support for iOS 14
|
.unsafeFlags(["-framework", "Foundation"]),
|
||||||
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
.unsafeFlags(["-framework", "Accelerate"]),
|
||||||
// .define("ACCELERATE_NEW_LAPACK"),
|
|
||||||
// .define("ACCELERATE_LAPACK_ILP64")
|
|
||||||
]
|
]
|
||||||
|
|
||||||
#if canImport(Darwin)
|
|
||||||
sources.append("ggml/src/ggml-common.h")
|
|
||||||
sources.append("ggml/src/ggml-metal/ggml-metal.m")
|
|
||||||
resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
|
|
||||||
linkerSettings.append(.linkedFramework("Accelerate"))
|
|
||||||
cSettings.append(
|
|
||||||
contentsOf: [
|
|
||||||
.define("GGML_USE_ACCELERATE"),
|
|
||||||
.define("GGML_USE_METAL")
|
|
||||||
]
|
|
||||||
)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if os(Linux)
|
#if os(Linux)
|
||||||
cSettings.append(.define("_GNU_SOURCE"))
|
cSettings.append(.define("_GNU_SOURCE"))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
let package = Package(
|
let baseSettings = cSettings + [
|
||||||
name: "llama",
|
.headerSearchPath("."),
|
||||||
platforms: [
|
.headerSearchPath("src"),
|
||||||
.macOS(.v12),
|
.headerSearchPath("common"),
|
||||||
.iOS(.v14),
|
.headerSearchPath("examples/omni-vlm"),
|
||||||
.watchOS(.v4),
|
.headerSearchPath("ggml/include"),
|
||||||
.tvOS(.v14)
|
]
|
||||||
],
|
|
||||||
products: [
|
let llamaTarget = Target.target(
|
||||||
.library(name: "llama", targets: ["llama"]),
|
|
||||||
],
|
|
||||||
targets: [
|
|
||||||
.target(
|
|
||||||
name: "llama",
|
name: "llama",
|
||||||
|
dependencies: [],
|
||||||
path: ".",
|
path: ".",
|
||||||
exclude: [
|
exclude: [
|
||||||
"build",
|
"build", "cmake", "examples", "scripts", "models",
|
||||||
"cmake",
|
"tests", "CMakeLists.txt", "Makefile",
|
||||||
"examples",
|
|
||||||
"scripts",
|
|
||||||
"models",
|
|
||||||
"tests",
|
|
||||||
"CMakeLists.txt",
|
|
||||||
"Makefile",
|
|
||||||
"ggml/src/ggml-metal-embed.metal"
|
|
||||||
],
|
],
|
||||||
sources: sources,
|
sources: sources,
|
||||||
resources: resources,
|
resources: resources,
|
||||||
publicHeadersPath: "spm-headers",
|
publicHeadersPath: "spm-headers",
|
||||||
cSettings: cSettings,
|
cSettings: cSettings,
|
||||||
linkerSettings: linkerSettings
|
linkerSettings: linkerSettings
|
||||||
)
|
)
|
||||||
],
|
|
||||||
cxxLanguageStandard: .cxx11
|
let omnivlmTarget = Target.target(
|
||||||
|
name: "omnivlm",
|
||||||
|
dependencies: ["llama"],
|
||||||
|
path: ".",
|
||||||
|
sources: omniVlmSources,
|
||||||
|
resources: resources,
|
||||||
|
publicHeadersPath: "spm/omnivlm",
|
||||||
|
cSettings: baseSettings + [
|
||||||
|
.headerSearchPath("ggml/src"),
|
||||||
|
],
|
||||||
|
cxxSettings: [.unsafeFlags(["-std=c++14"])],
|
||||||
|
linkerSettings: linkerSettings
|
||||||
|
)
|
||||||
|
|
||||||
|
let testTarget = Target.testTarget(
|
||||||
|
name: "LlavaTests",
|
||||||
|
dependencies: ["omnivlm"],
|
||||||
|
path: ".",
|
||||||
|
sources: testSources,
|
||||||
|
resources: resources,
|
||||||
|
cSettings: baseSettings + [
|
||||||
|
.headerSearchPath("ggml/src"),
|
||||||
|
],
|
||||||
|
linkerSettings: linkerSettings
|
||||||
|
)
|
||||||
|
|
||||||
|
let supportedPlatforms: [SupportedPlatform] = [
|
||||||
|
.macOS(.v12),
|
||||||
|
.iOS(.v14),
|
||||||
|
.watchOS(.v4),
|
||||||
|
.tvOS(.v14)
|
||||||
|
]
|
||||||
|
|
||||||
|
let products = [
|
||||||
|
Product.library(name: "llama", targets: ["llama"]),
|
||||||
|
Product.library(name: "omnivlm", targets: ["omnivlm"])
|
||||||
|
]
|
||||||
|
|
||||||
|
let package = Package(
|
||||||
|
name: "llama",
|
||||||
|
platforms: supportedPlatforms,
|
||||||
|
products: products,
|
||||||
|
targets: [
|
||||||
|
llamaTarget,
|
||||||
|
omnivlmTarget,
|
||||||
|
testTarget
|
||||||
|
],
|
||||||
|
cxxLanguageStandard: .cxx14
|
||||||
)
|
)
|
||||||
|
|
5
examples/omni-vlm/build-info.cpp
Normal file
5
examples/omni-vlm/build-info.cpp
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
// src/build-info.cpp
|
||||||
|
int LLAMA_BUILD_NUMBER = 0;
|
||||||
|
const char* LLAMA_BUILD_TARGET = "ios-arm64";
|
||||||
|
const char* LLAMA_COMMIT = "unknown";
|
||||||
|
const char* LLAMA_COMPILER = "clang";
|
1
spm/omnivlm/omni-vlm-wrapper.h
Symbolic link
1
spm/omnivlm/omni-vlm-wrapper.h
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../examples/omni-vlm/omni-vlm-wrapper.h
|
27
swift/LlavaTests/LlavaTests.swift
Normal file
27
swift/LlavaTests/LlavaTests.swift
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
import XCTest
|
||||||
|
@testable import omnivlm
|
||||||
|
|
||||||
|
final class LlavaTests: XCTestCase {
|
||||||
|
func testOmniVlm() {
|
||||||
|
omnivlm_init("model-q4_0.gguf",
|
||||||
|
"projector-q4_0.gguf",
|
||||||
|
"vlm-81-instruct")
|
||||||
|
|
||||||
|
let startTime = Date()
|
||||||
|
|
||||||
|
if let cString = omnivlm_inference("describe the image", "cat.png") {
|
||||||
|
let res = String(cString: cString)
|
||||||
|
print("res: \(res)")
|
||||||
|
|
||||||
|
let endTime = Date()
|
||||||
|
let inferenceTime = endTime.timeIntervalSince(startTime)
|
||||||
|
print("Inference time: \(inferenceTime) seconds")
|
||||||
|
|
||||||
|
XCTAssertFalse(res.isEmpty, "res should not be null")
|
||||||
|
} else {
|
||||||
|
XCTFail("failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
omnivlm_free()
|
||||||
|
}
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue