minor change

This commit is contained in:
oKatanaaa 2023-03-16 22:38:06 +00:00
parent 7252a2b658
commit fc924e9aba

View file

@ -97,6 +97,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
return false; return false;
} }
// Having a large buffer helps to accelerate load considerably (old buffer was 1024 * 1024).
// Though I am not sure if it's okay for edge devices like Raspberry Pi.
std::vector<char> f_buf(128 * 1024 * 1024); std::vector<char> f_buf(128 * 1024 * 1024);
setvbuf(fin, f_buf.data(), _IOFBF, f_buf.size()); setvbuf(fin, f_buf.data(), _IOFBF, f_buf.size());