gfs2: buffered write prefaulting

In gfs2_file_buffered_write, to increase the likelihood that all the
user memory we're trying to write will be resident in memory, carry out
the write in chunks and fault in each chunk of user memory before trying
to write it.  Otherwise, some workloads will trigger frequent short
"internal" writes, causing filesystem blocks to be allocated and then
partially deallocated again when writing into holes, which is wasteful
and breaks reservations.

Neither the chunked writes nor any of the short "internal" writes are
user visible.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
Andreas Gruenbacher 2022-05-04 23:37:30 +02:00
parent 324d116c5a
commit fa5dfa645d
1 changed files with 16 additions and 12 deletions

View File

@ -778,7 +778,7 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
size_t count = iov_iter_count(i);
size_t size, offs;
if (likely(!count))
if (!count)
return false;
if (!iter_is_iovec(i))
return false;
@ -1033,7 +1033,20 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
retry_under_glock:
gfs2_holder_allow_demote(gh);
window_size -= fault_in_iov_iter_readable(from, window_size);
gfs2_holder_disallow_demote(gh);
if (!window_size) {
ret = -EFAULT;
goto out_unlock;
}
if (!gfs2_holder_queued(gh))
goto retry;
from->count = min(from->count, window_size);
}
if (inode == sdp->sd_rindex) {
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@ -1060,17 +1073,8 @@ retry_under_glock:
goto out_unlock;
from->count = orig_count - written;
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
gfs2_holder_allow_demote(gh);
window_size -= fault_in_iov_iter_readable(from, window_size);
gfs2_holder_disallow_demote(gh);
if (window_size) {
from->count = min(from->count, window_size);
if (gfs2_holder_queued(gh))
goto retry_under_glock;
goto retry;
}
}
if (should_fault_in_pages(from, iocb, &prev_count, &window_size))
goto retry_under_glock;
out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);