diff --git a/.gitignore b/.gitignore index 3087b0ea5..a50bd8403 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ .vscode/ .DS_Store +.build/ build/ build-em/ build-debug/ @@ -25,3 +26,7 @@ compile_commands.json .envrc .direnv/ + +.venv +__pycache__ +.swiftpm diff --git a/Package.swift b/Package.swift new file mode 100644 index 000000000..de0b66804 --- /dev/null +++ b/Package.swift @@ -0,0 +1,20 @@ +// swift-tools-version:5.3 + +import PackageDescription + +let package = Package( + name: "llama", + products: [ + .library(name: "llama", targets: ["llama"]), + ], + targets: [ + .target( + name: "llama", + path: ".", + exclude: ["main.cpp", "tests", "quantize.cpp"], + publicHeadersPath: "spm-headers", + cSettings: [.unsafeFlags(["-Wno-shorten-64-to-32"])] + ), + ], + cxxLanguageStandard: .cxx11 +) diff --git a/llama.h b/llama.h index 3df9ed1fd..1afb2e0a3 100644 --- a/llama.h +++ b/llama.h @@ -117,7 +117,7 @@ extern "C" { // TODO: improve the last_n_tokens interface ? LLAMA_API llama_token llama_sample_top_p_top_k( - llama_context * ctx, + struct llama_context * ctx, const llama_token * last_n_tokens_data, int last_n_tokens_size, int top_k, diff --git a/spm-headers/llama.h b/spm-headers/llama.h new file mode 120000 index 000000000..9acceb980 --- /dev/null +++ b/spm-headers/llama.h @@ -0,0 +1 @@ +../llama.h \ No newline at end of file