locking/percpu-rwsem: Remove preempt_disable variants

Effective revert commit:

  87709e28dc ("fs/locks: Use percpu_down_read_preempt_disable()")

This is causing major pain for PREEMPT_RT.

Sebastian did a lot of lockperf runs on 2 and 4 node machines with all
preemption modes (PREEMPT=n should be an obvious NOP for this patch
and thus serves as a good control) and no results showed significance
over 2-sigma (the PREEMPT=n results were almost empty at 1-sigma).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2019-02-21 15:38:40 +01:00 committed by Ingo Molnar
parent 0614621d89
commit 02e525b2af
2 changed files with 20 additions and 36 deletions

View File

@ -1058,7 +1058,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
return -ENOMEM;
}
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
@ -1100,7 +1100,7 @@ find_conflict:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
@ -1138,7 +1138,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
new_fl2 = locks_alloc_lock();
}
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
@ -1312,7 +1312,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
}
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
/*
* Free any unused locks.
*/
@ -1584,7 +1584,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return error;
}
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
@ -1636,13 +1636,13 @@ restart:
locks_insert_block(fl, new_fl, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_blocker, break_time);
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
@ -1659,7 +1659,7 @@ restart:
}
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
@ -1729,7 +1729,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@ -1739,7 +1739,7 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
}
@ -1813,7 +1813,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL;
}
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags);
@ -1884,7 +1884,7 @@ out_setup:
lease->fl_lmops->lm_setup(lease, priv);
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
@ -1907,7 +1907,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
return error;
}
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp &&
@ -1920,7 +1920,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
return error;
}
@ -2643,13 +2643,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease))
return;
percpu_down_read_preempt_disable(&file_rwsem);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
if (filp == fl->fl_file)
lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
}

View File

@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
might_sleep();
@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
barrier();
/*
* The barrier() prevents the compiler from
* The preempt_enable() prevents the compiler from
* bleeding the critical section out.
*/
}
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
percpu_down_read_preempt_disable(sem);
preempt_enable();
}
@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret;
}
static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
/*
* The barrier() prevents the compiler from
* bleeding the critical section out.
*/
barrier();
preempt_disable();
/*
* Same as in percpu_down_read().
*/
@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
preempt_disable();
percpu_up_read_preempt_enable(sem);
}
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);