From c56e19db4b62b23bce11aea99e36e3b73ffa8d47 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 26 Apr 2024 12:58:07 +0300 Subject: [PATCH] lint : fix whitespaces --- convert-hf-to-gguf.py | 2 +- llama.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 6fbe8067b..f079fcd42 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1315,7 +1315,7 @@ class PersimmonModel(Model): @Model.register("LlamaForCausalLM") class DeepseekCoderModel(Model): model_arch = gguf.MODEL_ARCH.LLAMA - + def set_gguf_parameters(self): super().set_gguf_parameters() head_count = self.hparams["num_attention_heads"] diff --git a/llama.cpp b/llama.cpp index 143f62581..e3a32cf7e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4247,7 +4247,7 @@ static void llm_load_vocab( vocab.type = LLAMA_VOCAB_TYPE_DEEPSEEKCODER; } else if (tokenizer_name == "deepseek_llm") { vocab.type = LLAMA_VOCAB_TYPE_DEEPSEEKLLM; - } else { + } else { LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str()); LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__); vocab.type = LLAMA_VOCAB_TYPE_SPM; @@ -11812,7 +11812,7 @@ static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) { return vocab.token_to_id.at(buf2); } case LLAMA_VOCAB_TYPE_WPM: - case LLAMA_VOCAB_TYPE_DEEPSEEKCODER: + case LLAMA_VOCAB_TYPE_DEEPSEEKCODER: case LLAMA_VOCAB_TYPE_BPE: { return vocab.token_to_id.at(unicode_byte_to_utf8(ch)); }