diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index d205367b3..52c9c0eb4 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1747,18 +1747,18 @@ struct train_params { int32_t lora_r; int32_t lora_alpha; - int n_rank_attention_norm; - int n_rank_wq; - int n_rank_wk; - int n_rank_wv; - int n_rank_wo; - int n_rank_ffn_norm; - int n_rank_w1; - int n_rank_w2; - int n_rank_w3; - int n_rank_tok_embeddings; - int n_rank_norm; - int n_rank_output; + uint32_t n_rank_attention_norm; + uint32_t n_rank_wq; + uint32_t n_rank_wk; + uint32_t n_rank_wv; + uint32_t n_rank_wo; + uint32_t n_rank_ffn_norm; + uint32_t n_rank_w1; + uint32_t n_rank_w2; + uint32_t n_rank_w3; + uint32_t n_rank_tok_embeddings; + uint32_t n_rank_norm; + uint32_t n_rank_output; bool samples_start_after_nl; bool use_adam; diff --git a/ggml.c b/ggml.c index d7dc3cb44..e9fdb0d10 100644 --- a/ggml.c +++ b/ggml.c @@ -16476,7 +16476,7 @@ static struct ggml_tensor * ggml_recompute_graph_node( ggml_format_name(clone, "%s (clone)", ggml_get_name(node)); return clone; -}; +} void ggml_build_backward_gradient_checkpointing( struct ggml_context * ctx,