diff --git a/llama.cpp b/llama.cpp index e16810456..2b79e871c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3003,7 +3003,7 @@ struct llama_model_loader { const int tensor_idx = gguf_find_tensor(gguf_ctx, name); offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx); - if (offs + ggml_nbytes(tensor) > file->size) { + if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) { throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name)); } }