From eaff11ca55d56aeec10ab4e88085113be71c4b61 Mon Sep 17 00:00:00 2001 From: hiepxanh Date: Mon, 4 Mar 2024 21:12:50 -0800 Subject: [PATCH] readme: add convert-hf-to-gguf.py in example I'm using huggingface model and find out that I need to use different script. So I make this change for people to save time instead of read or open new issue --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index f754022de..3a23fe3f1 100644 --- a/README.md +++ b/README.md @@ -698,6 +698,9 @@ python convert.py models/mymodel/ --vocab-type bpe # update the gguf filetype to current version if older version is now unsupported ./quantize ./models/mymodel/ggml-model-Q4_K_M.gguf ./models/mymodel/ggml-model-Q4_K_M-v2.gguf COPY + +# convert the hunggingface model to ggml FP16 format +python3 convert-hf-to-gguf.py models/mymodel/ ``` ### Run the quantized model