From 01d16e1a1efced0cfbe92ed0c94c8003d22dbe54 Mon Sep 17 00:00:00 2001 From: Maximilian Markewitz <77107165+mj-shifu@users.noreply.github.com> Date: Thu, 27 Jul 2023 20:03:43 +0200 Subject: [PATCH] convert.py : fix of type and shorter code --- convert.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/convert.py b/convert.py index 57915d509..548bd9d3b 100644 --- a/convert.py +++ b/convert.py @@ -146,7 +146,7 @@ class Params: n_mult: int n_head: int n_layer: int - n_kv_head: int # This parameter is only used for Llama 2 + n_kv_head: Optional[int] # This parameter is only used for Llama 2 @staticmethod def guessed(model: 'LazyModel') -> 'Params': @@ -185,10 +185,7 @@ class Params: n_head = config["num_attention_heads"]; n_layer = config["num_hidden_layers"]; n_ff = config["intermediate_size"]; - if "num_key_value_heads" in config: - n_kv_head = config["num_key_value_heads"] - else: - n_kv_head = None + n_kv_head = config.get("num_key_value_heads") n_mult = find_n_mult(n_ff, n_embd);