mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
lockdep: Use consistent printing primitives
Commit a5dd63efda
("lockdep: Use "WARNING" tag on lockdep splats")
substituted pr_warn() for printk() in places called out by Dmitry Vyukov.
However, this resulted in an ugly mix of pr_warn() and printk(). This
commit therefore changes printk() to pr_warn() or pr_cont(), depending
on the absence or presence of KERN_CONT. This is done in all functions
that had printk() changed to pr_warn() by the aforementioned commit.
Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
2464dd940e
commit
681fbec881
1 changed files with 86 additions and 86 deletions
|
@ -1157,18 +1157,18 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
|
|||
if (debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("======================================================\n");
|
||||
pr_warn("WARNING: possible circular locking dependency detected\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("------------------------------------------------------\n");
|
||||
printk("%s/%d is trying to acquire lock:\n",
|
||||
pr_warn("%s/%d is trying to acquire lock:\n",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lock(check_src);
|
||||
printk("\nbut task is already holding lock:\n");
|
||||
pr_warn("\nbut task is already holding lock:\n");
|
||||
print_lock(check_tgt);
|
||||
printk("\nwhich lock already depends on the new lock.\n\n");
|
||||
printk("\nthe existing dependency chain (in reverse order) is:\n");
|
||||
pr_warn("\nwhich lock already depends on the new lock.\n\n");
|
||||
pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
|
||||
|
||||
print_circular_bug_entry(entry, depth);
|
||||
|
||||
|
@ -1495,13 +1495,13 @@ print_bad_irq_dependency(struct task_struct *curr,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("=====================================================\n");
|
||||
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
|
||||
irqclass, irqclass);
|
||||
print_kernel_ident();
|
||||
pr_warn("-----------------------------------------------------\n");
|
||||
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
|
||||
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
|
||||
curr->comm, task_pid_nr(curr),
|
||||
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
|
||||
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
|
||||
|
@ -1509,46 +1509,46 @@ print_bad_irq_dependency(struct task_struct *curr,
|
|||
curr->softirqs_enabled);
|
||||
print_lock(next);
|
||||
|
||||
printk("\nand this task is already holding:\n");
|
||||
pr_warn("\nand this task is already holding:\n");
|
||||
print_lock(prev);
|
||||
printk("which would create a new lock dependency:\n");
|
||||
pr_warn("which would create a new lock dependency:\n");
|
||||
print_lock_name(hlock_class(prev));
|
||||
printk(KERN_CONT " ->");
|
||||
pr_cont(" ->");
|
||||
print_lock_name(hlock_class(next));
|
||||
printk(KERN_CONT "\n");
|
||||
pr_cont("\n");
|
||||
|
||||
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
|
||||
pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
|
||||
irqclass);
|
||||
print_lock_name(backwards_entry->class);
|
||||
printk("\n... which became %s-irq-safe at:\n", irqclass);
|
||||
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
|
||||
|
||||
print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
|
||||
|
||||
printk("\nto a %s-irq-unsafe lock:\n", irqclass);
|
||||
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
|
||||
print_lock_name(forwards_entry->class);
|
||||
printk("\n... which became %s-irq-unsafe at:\n", irqclass);
|
||||
printk("...");
|
||||
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
|
||||
pr_warn("...");
|
||||
|
||||
print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
|
||||
|
||||
printk("\nother info that might help us debug this:\n\n");
|
||||
pr_warn("\nother info that might help us debug this:\n\n");
|
||||
print_irq_lock_scenario(backwards_entry, forwards_entry,
|
||||
hlock_class(prev), hlock_class(next));
|
||||
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
|
||||
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
|
||||
if (!save_trace(&prev_root->trace))
|
||||
return 0;
|
||||
print_shortest_lock_dependencies(backwards_entry, prev_root);
|
||||
|
||||
printk("\nthe dependencies between the lock to be acquired");
|
||||
printk(" and %s-irq-unsafe lock:\n", irqclass);
|
||||
pr_warn("\nthe dependencies between the lock to be acquired");
|
||||
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
|
||||
if (!save_trace(&next_root->trace))
|
||||
return 0;
|
||||
print_shortest_lock_dependencies(forwards_entry, next_root);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -1724,22 +1724,22 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("============================================\n");
|
||||
pr_warn("WARNING: possible recursive locking detected\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("--------------------------------------------\n");
|
||||
printk("%s/%d is trying to acquire lock:\n",
|
||||
pr_warn("%s/%d is trying to acquire lock:\n",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lock(next);
|
||||
printk("\nbut task is already holding lock:\n");
|
||||
pr_warn("\nbut task is already holding lock:\n");
|
||||
print_lock(prev);
|
||||
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
print_deadlock_scenario(next, prev);
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -2074,21 +2074,21 @@ static void print_collision(struct task_struct *curr,
|
|||
struct held_lock *hlock_next,
|
||||
struct lock_chain *chain)
|
||||
{
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("============================\n");
|
||||
pr_warn("WARNING: chain_key collision\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("----------------------------\n");
|
||||
printk("%s/%d: ", current->comm, task_pid_nr(current));
|
||||
printk("Hash chain already cached but the contents don't match!\n");
|
||||
pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
|
||||
pr_warn("Hash chain already cached but the contents don't match!\n");
|
||||
|
||||
printk("Held locks:");
|
||||
pr_warn("Held locks:");
|
||||
print_chain_keys_held_locks(curr, hlock_next);
|
||||
|
||||
printk("Locks in cached chain:");
|
||||
pr_warn("Locks in cached chain:");
|
||||
print_chain_keys_chain(chain);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
}
|
||||
#endif
|
||||
|
@ -2373,16 +2373,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("================================\n");
|
||||
pr_warn("WARNING: inconsistent lock state\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("--------------------------------\n");
|
||||
|
||||
printk("inconsistent {%s} -> {%s} usage.\n",
|
||||
pr_warn("inconsistent {%s} -> {%s} usage.\n",
|
||||
usage_str[prev_bit], usage_str[new_bit]);
|
||||
|
||||
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
|
||||
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
|
||||
curr->comm, task_pid_nr(curr),
|
||||
trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
|
||||
trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
|
||||
|
@ -2390,16 +2390,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
|||
trace_softirqs_enabled(curr));
|
||||
print_lock(this);
|
||||
|
||||
printk("{%s} state was registered at:\n", usage_str[prev_bit]);
|
||||
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
|
||||
print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
|
||||
|
||||
print_irqtrace_events(curr);
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
print_usage_bug_scenario(this);
|
||||
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -2438,28 +2438,28 @@ print_irq_inversion_bug(struct task_struct *curr,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("========================================================\n");
|
||||
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("--------------------------------------------------------\n");
|
||||
printk("%s/%d just changed the state of lock:\n",
|
||||
pr_warn("%s/%d just changed the state of lock:\n",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lock(this);
|
||||
if (forwards)
|
||||
printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
|
||||
pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
|
||||
else
|
||||
printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
|
||||
pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
|
||||
print_lock_name(other->class);
|
||||
printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
|
||||
pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
|
||||
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
|
||||
/* Find a middle lock (if one exists) */
|
||||
depth = get_lock_depth(other);
|
||||
do {
|
||||
if (depth == 0 && (entry != root)) {
|
||||
printk("lockdep:%s bad path found in chain graph\n", __func__);
|
||||
pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
|
||||
break;
|
||||
}
|
||||
middle = entry;
|
||||
|
@ -2475,12 +2475,12 @@ print_irq_inversion_bug(struct task_struct *curr,
|
|||
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
|
||||
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
|
||||
if (!save_trace(&root->trace))
|
||||
return 0;
|
||||
print_shortest_lock_dependencies(other, root);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -3189,25 +3189,25 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
|
|||
if (debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("==================================\n");
|
||||
pr_warn("WARNING: Nested lock was not taken\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("----------------------------------\n");
|
||||
|
||||
printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
|
||||
pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
|
||||
print_lock(hlock);
|
||||
|
||||
printk("\nbut this task is not holding:\n");
|
||||
printk("%s\n", hlock->nest_lock->name);
|
||||
pr_warn("\nbut this task is not holding:\n");
|
||||
pr_warn("%s\n", hlock->nest_lock->name);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -3402,21 +3402,21 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
|
|||
if (debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("=====================================\n");
|
||||
pr_warn("WARNING: bad unlock balance detected!\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("-------------------------------------\n");
|
||||
printk("%s/%d is trying to release lock (",
|
||||
pr_warn("%s/%d is trying to release lock (",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lockdep_cache(lock);
|
||||
printk(KERN_CONT ") at:\n");
|
||||
pr_cont(") at:\n");
|
||||
print_ip_sym(ip);
|
||||
printk("but there are no more locks to release!\n");
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
pr_warn("but there are no more locks to release!\n");
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -3974,21 +3974,21 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
|
|||
if (debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("=================================\n");
|
||||
pr_warn("WARNING: bad contention detected!\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("---------------------------------\n");
|
||||
printk("%s/%d is trying to contend lock (",
|
||||
pr_warn("%s/%d is trying to contend lock (",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lockdep_cache(lock);
|
||||
printk(KERN_CONT ") at:\n");
|
||||
pr_cont(") at:\n");
|
||||
print_ip_sym(ip);
|
||||
printk("but there are no locks held!\n");
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
pr_warn("but there are no locks held!\n");
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
|
@ -4318,17 +4318,17 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("=========================\n");
|
||||
pr_warn("WARNING: held lock freed!\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("-------------------------\n");
|
||||
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
|
||||
pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
|
||||
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
|
||||
print_lock(hlock);
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
|
@ -4376,14 +4376,14 @@ static void print_held_locks_bug(void)
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("====================================\n");
|
||||
pr_warn("WARNING: %s/%d still has locks held!\n",
|
||||
current->comm, task_pid_nr(current));
|
||||
print_kernel_ident();
|
||||
pr_warn("------------------------------------\n");
|
||||
lockdep_print_held_locks(current);
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
|
@ -4402,10 +4402,10 @@ void debug_show_all_locks(void)
|
|||
int unlock = 1;
|
||||
|
||||
if (unlikely(!debug_locks)) {
|
||||
printk("INFO: lockdep is turned off.\n");
|
||||
pr_warn("INFO: lockdep is turned off.\n");
|
||||
return;
|
||||
}
|
||||
printk("\nShowing all locks held in the system:\n");
|
||||
pr_warn("\nShowing all locks held in the system:\n");
|
||||
|
||||
/*
|
||||
* Here we try to get the tasklist_lock as hard as possible,
|
||||
|
@ -4416,18 +4416,18 @@ void debug_show_all_locks(void)
|
|||
retry:
|
||||
if (!read_trylock(&tasklist_lock)) {
|
||||
if (count == 10)
|
||||
printk("hm, tasklist_lock locked, retrying... ");
|
||||
pr_warn("hm, tasklist_lock locked, retrying... ");
|
||||
if (count) {
|
||||
count--;
|
||||
printk(" #%d", 10-count);
|
||||
pr_cont(" #%d", 10-count);
|
||||
mdelay(200);
|
||||
goto retry;
|
||||
}
|
||||
printk(" ignoring it.\n");
|
||||
pr_cont(" ignoring it.\n");
|
||||
unlock = 0;
|
||||
} else {
|
||||
if (count != 10)
|
||||
printk(KERN_CONT " locked it.\n");
|
||||
pr_cont(" locked it.\n");
|
||||
}
|
||||
|
||||
do_each_thread(g, p) {
|
||||
|
@ -4445,7 +4445,7 @@ void debug_show_all_locks(void)
|
|||
unlock = 1;
|
||||
} while_each_thread(g, p);
|
||||
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("=============================================\n\n");
|
||||
|
||||
if (unlock)
|
||||
|
@ -4475,12 +4475,12 @@ asmlinkage __visible void lockdep_sys_exit(void)
|
|||
if (unlikely(curr->lockdep_depth)) {
|
||||
if (!debug_locks_off())
|
||||
return;
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("================================================\n");
|
||||
pr_warn("WARNING: lock held when returning to user space!\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("------------------------------------------------\n");
|
||||
printk("%s/%d is leaving the kernel with locks still held!\n",
|
||||
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
|
||||
curr->comm, curr->pid);
|
||||
lockdep_print_held_locks(curr);
|
||||
}
|
||||
|
@ -4495,14 +4495,14 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
return;
|
||||
#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
|
||||
/* Note: the following can be executed concurrently, so be careful. */
|
||||
printk("\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("=============================\n");
|
||||
pr_warn("WARNING: suspicious RCU usage\n");
|
||||
print_kernel_ident();
|
||||
pr_warn("-----------------------------\n");
|
||||
printk("%s:%d %s!\n", file, line, s);
|
||||
printk("\nother info that might help us debug this:\n\n");
|
||||
printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
|
||||
pr_warn("%s:%d %s!\n", file, line, s);
|
||||
pr_warn("\nother info that might help us debug this:\n\n");
|
||||
pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
|
||||
!rcu_lockdep_current_cpu_online()
|
||||
? "RCU used illegally from offline CPU!\n"
|
||||
: !rcu_is_watching()
|
||||
|
@ -4529,10 +4529,10 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
* rcu_read_lock_bh() and so on from extended quiescent states.
|
||||
*/
|
||||
if (!rcu_is_watching())
|
||||
printk("RCU used illegally from extended quiescent state!\n");
|
||||
pr_warn("RCU used illegally from extended quiescent state!\n");
|
||||
|
||||
lockdep_print_held_locks(curr);
|
||||
printk("\nstack backtrace:\n");
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
|
||||
|
|
Loading…
Reference in a new issue