From 9a09e6418f6f60c38d98bf09b82bce22e15c2f1a Mon Sep 17 00:00:00 2001 From: Aniket Date: Tue, 8 Aug 2023 14:00:05 -0400 Subject: [PATCH] minor spacing update --- examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp index 5a208a4f2..f7b144eed 100644 --- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp @@ -16,7 +16,7 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -//////////////////////////////////////// llama.c model structs and functions to load models, alloc memory etc. +//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc. typedef struct { int dim; // transformer dimension int hidden_dim; // for ffn layers @@ -582,9 +582,7 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod stuff_karpathy_weights_into_gg(layer.wo , &w->wo[i*row_length*row_length]); stuff_karpathy_weights_into_gg(layer.w1 , &w->w1[i*row_length*n_ff]); - stuff_karpathy_weights_into_gg(layer.w2 , &w->w2[i*n_ff*row_length]); - stuff_karpathy_weights_into_gg(layer.w3 , &w->w3[i*row_length*n_ff]); } // write tensors