mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-04 08:08:54 +00:00
xfs: reduce the number of atomic when locking a buffer after lookup
Avoid an extra atomic operation in the non-trylock case by only
doing a trylock if the XBF_TRYLOCK flag is set. This follows the
pattern in the IO path with NOWAIT semantics where the
"trylock-fail-lock" path showed 5-10% reduced throughput compared to
just using single lock call when not under NOWAIT conditions. So
make that same change here, too.
See commit 942491c9e6
("xfs: fix AIM7 regression") for details.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
[hch: split from a larger patch]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
parent
348000804a
commit
d8d9bbb0ee
1 changed files with 3 additions and 2 deletions
|
@ -534,11 +534,12 @@ xfs_buf_find_lock(
|
|||
struct xfs_buf *bp,
|
||||
xfs_buf_flags_t flags)
|
||||
{
|
||||
if (!xfs_buf_trylock(bp)) {
|
||||
if (flags & XBF_TRYLOCK) {
|
||||
if (flags & XBF_TRYLOCK) {
|
||||
if (!xfs_buf_trylock(bp)) {
|
||||
XFS_STATS_INC(bp->b_mount, xb_busy_locked);
|
||||
return -EAGAIN;
|
||||
}
|
||||
} else {
|
||||
xfs_buf_lock(bp);
|
||||
XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue