From 348acf188c9fbe66396990f2dc83229df367969b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20Tom=C5=A1=C3=ADk?= Date: Mon, 14 Aug 2023 15:35:16 +0200 Subject: [PATCH 1/4] llama : add missing enum keyword in function signatures (#2610) --- llama.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.h b/llama.h index d237bcc54..92b474891 100644 --- a/llama.h +++ b/llama.h @@ -97,7 +97,7 @@ extern "C" { // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it // if it exists. // It might not exist for progress report where '.' is output repeatedly. - typedef void (*llama_log_callback)(llama_log_level level, const char * text, void * user_data); + typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); struct llama_context_params { uint32_t seed; // RNG seed, -1 for random From d75561df207d22790609ee0ad924302f66ac2599 Mon Sep 17 00:00:00 2001 From: Cheng Shao Date: Mon, 14 Aug 2023 15:36:42 +0200 Subject: [PATCH 2/4] server : add --numa support (#2524) --- examples/server/README.md | 1 + examples/server/server.cpp | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/examples/server/README.md b/examples/server/README.md index e56ca063a..1559dd3f2 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -16,6 +16,7 @@ Command line options: - `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended. - `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped. - `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. +- `--numa`: Attempt optimizations that help on some NUMA systems. - `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains. - `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation. - `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 2340f93ac..222dbcb43 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -666,6 +666,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, { fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } + fprintf(stdout, " --numa attempt optimizations that help on some NUMA systems\n"); #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD fprintf(stdout, " -ngl N, --n-gpu-layers N\n"); fprintf(stdout, " number of layers to store in VRAM\n"); @@ -940,6 +941,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } + else if (arg == "--numa") + { + params.numa = true; + } else if (arg == "--embedding") { params.embedding = true; From d783f7982e0e823a2626a9956359c0d36c1a7e21 Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Mon, 14 Aug 2023 21:37:39 +0800 Subject: [PATCH 3/4] metal : return null instead of exit(1) (#2573) --- ggml-metal.m | 6 +++--- llama.cpp | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index b47a98e21..fbac21e3a 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -126,7 +126,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error]; if (error) { fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]); - exit(1); + return NULL; } } #else @@ -144,7 +144,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error]; if (error) { fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]); - exit(1); + return NULL; } #ifdef GGML_QKK_64 @@ -156,7 +156,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { #endif if (error) { fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]); - exit(1); + return NULL; } } #endif diff --git a/llama.cpp b/llama.cpp index 0cf2b3749..c8ab313d9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3337,6 +3337,12 @@ struct llama_context * llama_new_context_with_model( // this allocates all Metal resources and memory buffers ctx->ctx_metal = ggml_metal_init(1); + if (!ctx->ctx_metal) { + LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); + llama_free(ctx); + return NULL; + } + void * data_ptr = NULL; size_t data_size = 0; From 3ebb00935f3f0522b75df49c2769ab1774b91380 Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Tue, 15 Aug 2023 06:14:14 +0800 Subject: [PATCH 4/4] server : add missing /json-schema-to-grammar.mjs (#2616) fixes #2611 --- examples/server/server.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 222dbcb43..99660455a 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -15,6 +15,7 @@ #include "index.html.hpp" #include "index.js.hpp" #include "completion.js.hpp" +#include "json-schema-to-grammar.mjs.hpp" #ifndef SERVER_VERBOSE #define SERVER_VERBOSE 1 @@ -1218,6 +1219,12 @@ int main(int argc, char **argv) res.set_content(reinterpret_cast(&completion_js), completion_js_len, "application/javascript"); return false; }); + // this is only called if no index.html is found in the public --path + svr.Get("/json-schema-to-grammar.mjs", [](const Request &, Response &res) + { + res.set_content(reinterpret_cast(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript"); + return false; }); + svr.Post("/completion", [&llama](const Request &req, Response &res) { auto lock = llama.lock();