locking/lockdep: Factor out the find_held_lock() helper function

A simple consolidataion to factor out repeated patterns.

The behaviour should not change.

Signed-off-by: J. R. Okajima <hooanon05g@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1486053497-9948-1-git-send-email-hooanon05g@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
J. R. Okajima 2017-02-03 01:38:15 +09:00 committed by Ingo Molnar
parent f2c716e4dd
commit 41c2c5b86a
1 changed files with 54 additions and 60 deletions

View File

@ -3437,13 +3437,49 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
return 0;
}
/* @depth must not be zero */
static struct held_lock *find_held_lock(struct task_struct *curr,
struct lockdep_map *lock,
unsigned int depth, int *idx)
{
struct held_lock *ret, *hlock, *prev_hlock;
int i;
i = depth - 1;
hlock = curr->held_locks + i;
ret = hlock;
if (match_held_lock(hlock, lock))
goto out;
ret = NULL;
for (i--, prev_hlock = hlock--;
i >= 0;
i--, prev_hlock = hlock--) {
/*
* We must not cross into another context:
*/
if (prev_hlock->irq_context != hlock->irq_context) {
ret = NULL;
break;
}
if (match_held_lock(hlock, lock)) {
ret = hlock;
break;
}
}
out:
*idx = i;
return ret;
}
static int
__lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct held_lock *hlock;
struct lock_class *class;
unsigned int depth;
int i;
@ -3456,21 +3492,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
return print_unlock_imbalance_bug(curr, lock, ip);
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock)
return print_unlock_imbalance_bug(curr, lock, ip);
found_it:
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes + 1;
@ -3508,7 +3533,7 @@ static int
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct held_lock *hlock;
unsigned int depth;
int i;
@ -3527,21 +3552,10 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
* Check whether the lock exists in the current stack
* of held locks:
*/
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
return print_unlock_imbalance_bug(curr, lock, ip);
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock)
return print_unlock_imbalance_bug(curr, lock, ip);
found_it:
if (hlock->instance == lock)
lock_release_holdtime(hlock);
@ -3903,7 +3917,7 @@ static void
__lock_contended(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct held_lock *hlock;
struct lock_class_stats *stats;
unsigned int depth;
int i, contention_point, contending_point;
@ -3916,22 +3930,12 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_lock_contention_bug(curr, lock, ip);
return;
}
print_lock_contention_bug(curr, lock, ip);
return;
found_it:
if (hlock->instance != lock)
return;
@ -3955,7 +3959,7 @@ static void
__lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct held_lock *hlock;
struct lock_class_stats *stats;
unsigned int depth;
u64 now, waittime = 0;
@ -3969,22 +3973,12 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_lock_contention_bug(curr, lock, _RET_IP_);
return;
}
print_lock_contention_bug(curr, lock, _RET_IP_);
return;
found_it:
if (hlock->instance != lock)
return;