quantize : fail fast on write errors

This commit is contained in:
Cebtenzzre 2023-10-06 23:11:49 -04:00
parent 48edda30ee
commit 87a35470e0

View file

@ -6639,6 +6639,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
}
std::ofstream fout(fname_out, std::ios::binary);
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
const size_t meta_size = gguf_get_meta_size(ctx_out);