convert-hf : use GPT2 vocab and ignore sliding_window hparam for Phi-4 model

llama : use regular (not a sliding window) attention mask for Phi-4 model
This commit is contained in:
Stanisław Szymczyk 2024-12-13 18:55:08 +01:00
parent 11e07fd63b
commit 7555ab1cdc
2 changed files with 14 additions and 4 deletions

View file

@ -2129,6 +2129,9 @@ class Phi3MiniModel(Model):
model_arch = gguf.MODEL_ARCH.PHI3
def set_vocab(self):
if self.metadata.name == "Phi 4":
return self._set_vocab_gpt2()
from sentencepiece import SentencePieceProcessor
tokenizer_path = self.dir_model / 'tokenizer.model'
@ -2245,7 +2248,8 @@ class Phi3MiniModel(Model):
self.gguf_writer.add_rope_dimension_count(rope_dims)
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
self.gguf_writer.add_file_type(self.ftype)
self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
if self.metadata.name != "Phi 4":
self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
n_embd = self.find_hparam(["hidden_size", "n_embd"])

View file

@ -5807,7 +5807,7 @@ static void llm_load_hparams(
hparams.n_swa = 131072;
}
bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
if (!found_swa && hparams.n_swa == 0) {
if (!found_swa && hparams.n_swa == 0 && model.name != "Phi 4") {
throw std::runtime_error("invalid value for sliding_window");
}
} break;
@ -12839,7 +12839,13 @@ struct llm_build_context {
struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
struct ggml_tensor * KQ_mask = nullptr;
if (model.name == "Phi 4") {
// Phi-4 doesn't use sliding window attention
KQ_mask = build_inp_KQ_mask();
} else {
KQ_mask = build_inp_KQ_mask_swa();
}
for (int il = 0; il < n_layer; ++il) {
auto residual = inpL;
@ -12897,7 +12903,7 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il);
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
}
if (il == n_layer - 1) {