diff --git a/README.md b/README.md index 058919068..7c233b5e1 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png) [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) -[![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg?branch=master&event=schedule)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml) +[![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml) [![Conan Center](https://shields.io/conan/v/llama-cpp)](https://conan.io/center/llama-cpp) [Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml) diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 2ae2d6d5f..7a39c685b 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -19019,7 +19019,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { FILE * fout = ggml_fopen(fname, "wb"); if (!fout) { - fprintf(stderr, "%s: failed to open %s\n", __func__, fname); + fprintf(stderr, "%s: failed to open %s: %s\n", __func__, fname, strerror(errno)); return; } @@ -19156,7 +19156,7 @@ struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context * { FILE * fin = ggml_fopen(fname, "rb"); if (!fin) { - fprintf(stderr, "%s: failed to open %s\n", __func__, fname); + fprintf(stderr, "%s: failed to open %s: %s\n", __func__, fname, strerror(errno)); return result; } @@ -20830,6 +20830,7 @@ struct gguf_context * gguf_init_empty(void) { struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { FILE * file = ggml_fopen(fname, "rb"); if (!file) { + fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno)); return NULL; } diff --git a/gguf-py/gguf/metadata.py b/gguf-py/gguf/metadata.py index be297f242..bac6ebfb3 100644 --- a/gguf-py/gguf/metadata.py +++ b/gguf-py/gguf/metadata.py @@ -62,6 +62,7 @@ class Metadata: # This is based on LLM_KV_NAMES mapping in llama.cpp metadata_override = Metadata.load_metadata_override(metadata_override_path) + metadata.name = metadata_override.get(Keys.General.NAME, metadata.name) metadata.author = metadata_override.get(Keys.General.AUTHOR, metadata.author) metadata.version = metadata_override.get(Keys.General.VERSION, metadata.version) metadata.organization = metadata_override.get(Keys.General.ORGANIZATION, metadata.organization) diff --git a/src/llama.cpp b/src/llama.cpp index 8d2e56080..64cc49149 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -21619,7 +21619,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|assistant|>"; } - } else if (tmpl == "chaglm4" || tmpl_contains("[gMASK]")) { + } else if (tmpl == "chatglm4" || tmpl_contains("[gMASK]")) { ss << "[gMASK]" << ""; for (auto message : chat) { std::string role(message->role);