diff --git a/llama.cpp b/llama.cpp index 13310a8df..60e4b529c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2,8 +2,6 @@ #include "ggml.h" -#include - #include #include #include @@ -417,12 +415,11 @@ static bool llama_model_load( fin = std::ifstream(fname_part, std::ios::binary); fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); - fin.seekg(file_offset); - // stat the file for file size - struct stat st; - stat(fname_part.c_str(), &st); - const size_t file_size = st.st_size; + fin.seekg(0, fin.end); + const size_t file_size = fin.tellg(); + + fin.seekg(file_offset); // load weights {