Changes in this cycle were:

- bitops & cpumask:
     - Always inline various generic helpers, to improve code generation,
       but also for instrumentation, found by noinstr validation.
     - Add a x86-specific cpumask_clear_cpu() helper to improve code generation
 
  - atomics:
     - Fix atomic64_{read_acquire,set_release} fallbacks
 
  - lockdep:
     - Fix /proc/lockdep output loop iteration for classes
     - Fix /proc/lockdep potential access to invalid memory
     - minor cleanups
     - Add Mark Rutland as reviewer for atomic primitives
 
  - jump labels:
     - Clean up the code a bit
 
  - misc:
     - Add __sched annotations to percpu rwsem primitives
     - Enable RT_MUTEXES on PREEMPT_RT by default
     - Stray v8086_mode() inlining fix, result of noinstr objtool validation
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmI4XQgRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1imLg//SusL4SW7xWprktpltACjjOk2UDB6x26A
 GfG3vOxjdqZ1qCrVQqNHialOTj3Wci2HxAarKui9of9o7ueEQNGsyvMQte8xJUhw
 osWDFbTlzr2WmkH8I5FPtPq30P7ulcOa6eZNO/1M2IIvXYQkGYgTosXRPmD/fIKA
 qJgw2V7B8QME9rHT/0kLSlhTzHjvu0y1dK9rTr5oVocZER1e/cXVFkSUz/uGL/XH
 /mpWzD/dwGXvrbgGbewvzZ0L7jO/EH3/ZAUDgsksebRSqa3+Ln3Gm8mMA5Hx1Vpm
 a4CMi7hrCJ1ZWSnleDRtxDAgHG20BDKFMLxsTPAySoy4dQ+KT2KieAlo7U3L1ABJ
 G7xQfS/OUd/mRptXUQYTfv5wfGt/xqZAyV31RTQJElKetWBcL1du4uc4g4fITgVN
 8zpIOBK7AyeiSLCG4LLN3ROa5oYPoCawsUkokeaewiasacvDKquDEj/ZtUH7eNCm
 1AGM2RCJim2YpWyGzX3jrCMK9/ERZjw0MJUDUXpUIUE1NBuoWhkWpuYbu+P0JQ+D
 0Z3Hxo/4JYnF1nEH7a87q0QBr7QnHFW8fUgxuR5o5c5ks+kc4ym3tUT6Wi9mzDug
 PfFbTiP1AAWv65fvCVjZP/P+tL8019hRGhCWH9tkXNTxwSJJi2Ca7CGKH+4UI7bR
 uAkFrWht4K0=
 =04kk
 -----END PGP SIGNATURE-----

Merge tag 'locking-core-2022-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "Changes in this cycle were:

  Bitops & cpumask:
   - Always inline various generic helpers, to improve code generation,
     but also for instrumentation, found by noinstr validation.

   - Add a x86-specific cpumask_clear_cpu() helper to improve code
     generation

  Atomics:
   - Fix atomic64_{read_acquire,set_release} fallbacks

  Lockdep:
   - Fix /proc/lockdep output loop iteration for classes

   - Fix /proc/lockdep potential access to invalid memory

   - Add Mark Rutland as reviewer for atomic primitives

   - Minor cleanups

  Jump labels:
   - Clean up the code a bit

  Misc:
   - Add __sched annotations to percpu rwsem primitives

   - Enable RT_MUTEXES on PREEMPT_RT by default

   - Stray v8086_mode() inlining fix, result of noinstr objtool
     validation"

* tag 'locking-core-2022-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  jump_label: Refactor #ifdef of struct static_key
  jump_label: Avoid unneeded casts in STATIC_KEY_INIT_{TRUE,FALSE}
  locking/lockdep: Iterate lock_classes directly when reading lockdep files
  x86/ptrace: Always inline v8086_mode() for instrumentation
  cpumask: Add a x86-specific cpumask_clear_cpu() helper
  locking: Enable RT_MUTEXES by default on PREEMPT_RT.
  locking/local_lock: Make the empty local_lock_*() function a macro.
  atomics: Fix atomic64_{read_acquire,set_release} fallbacks
  locking: Add missing __sched attributes
  cpumask: Always inline helpers which use bit manipulation functions
  asm-generic/bitops: Always inline all bit manipulation helpers
  locking/lockdep: Avoid potential access of invalid memory in lock_class
  lockdep: Use memset_startat() helper in reinit_class()
  MAINTAINERS: add myself as reviewer for atomics
This commit is contained in:
Linus Torvalds 2022-03-22 13:44:21 -07:00
commit ebd326ce72
17 changed files with 168 additions and 74 deletions

View File

@ -3205,6 +3205,7 @@ ATOMIC INFRASTRUCTURE
M: Will Deacon <will@kernel.org>
M: Peter Zijlstra <peterz@infradead.org>
R: Boqun Feng <boqun.feng@gmail.com>
R: Mark Rutland <mark.rutland@arm.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h

View File

@ -20,11 +20,21 @@ static __always_inline bool arch_cpu_online(int cpu)
{
return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
}
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
arch_clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
#else
static __always_inline bool arch_cpu_online(int cpu)
{
return cpu == 0;
}
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
return;
}
#endif
#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))

View File

@ -137,7 +137,7 @@ static __always_inline int user_mode(struct pt_regs *regs)
#endif
}
static inline int v8086_mode(struct pt_regs *regs)
static __always_inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->flags & X86_VM_MASK);

View File

@ -23,7 +23,7 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
static __always_inline void set_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
@ -36,7 +36,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
@ -52,7 +52,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
@ -65,7 +65,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
@ -79,7 +79,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));

View File

@ -22,7 +22,7 @@
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
@ -37,7 +37,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
@ -52,13 +52,13 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
{
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
/*
@ -90,7 +90,7 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
@ -104,7 +104,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
@ -118,7 +118,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
@ -129,7 +129,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
static __always_inline bool test_bit(long nr, const volatile unsigned long *addr)
{
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);

View File

@ -151,7 +151,16 @@
static __always_inline int
arch_atomic_read_acquire(const atomic_t *v)
{
return smp_load_acquire(&(v)->counter);
int ret;
if (__native_word(atomic_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
ret = arch_atomic_read(v);
__atomic_acquire_fence();
}
return ret;
}
#define arch_atomic_read_acquire arch_atomic_read_acquire
#endif
@ -160,7 +169,12 @@ arch_atomic_read_acquire(const atomic_t *v)
static __always_inline void
arch_atomic_set_release(atomic_t *v, int i)
{
smp_store_release(&(v)->counter, i);
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
arch_atomic_set(v, i);
}
}
#define arch_atomic_set_release arch_atomic_set_release
#endif
@ -1258,7 +1272,16 @@ arch_atomic_dec_if_positive(atomic_t *v)
static __always_inline s64
arch_atomic64_read_acquire(const atomic64_t *v)
{
return smp_load_acquire(&(v)->counter);
s64 ret;
if (__native_word(atomic64_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
ret = arch_atomic64_read(v);
__atomic_acquire_fence();
}
return ret;
}
#define arch_atomic64_read_acquire arch_atomic64_read_acquire
#endif
@ -1267,7 +1290,12 @@ arch_atomic64_read_acquire(const atomic64_t *v)
static __always_inline void
arch_atomic64_set_release(atomic64_t *v, s64 i)
{
smp_store_release(&(v)->counter, i);
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
arch_atomic64_set(v, i);
}
}
#define arch_atomic64_set_release arch_atomic64_set_release
#endif
@ -2358,4 +2386,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
#endif
#endif /* _LINUX_ATOMIC_FALLBACK_H */
// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae

View File

@ -102,7 +102,7 @@ extern atomic_t __num_online_cpus;
extern cpumask_t cpus_booted_once_mask;
static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
WARN_ON_ONCE(cpu >= bits);
@ -110,7 +110,7 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
}
/* verify cpu argument to cpumask_* operators */
static inline unsigned int cpumask_check(unsigned int cpu)
static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
cpu_max_bits_warn(cpu, nr_cpumask_bits);
return cpu;
@ -341,12 +341,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@ -357,12 +357,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@ -374,7 +374,7 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
*/
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@ -388,7 +388,7 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
*
* test_and_set_bit wrapper for cpumasks.
*/
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@ -402,7 +402,7 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
*
* test_and_clear_bit wrapper for cpumasks.
*/
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}

View File

@ -82,10 +82,9 @@ extern bool static_key_initialized;
"%s(): static key '%pS' used before call to jump_label_init()", \
__func__, (key))
#ifdef CONFIG_JUMP_LABEL
struct static_key {
atomic_t enabled;
#ifdef CONFIG_JUMP_LABEL
/*
* Note:
* To make anonymous unions work with old compilers, the static
@ -104,13 +103,9 @@ struct static_key {
struct jump_entry *entries;
struct static_key_mod *next;
};
#endif /* CONFIG_JUMP_LABEL */
};
#else
struct static_key {
atomic_t enabled;
};
#endif /* CONFIG_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_JUMP_LABEL
@ -251,10 +246,10 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
{ .entries = (void *)JUMP_TYPE_TRUE } }
{ .type = JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
{ .entries = (void *)JUMP_TYPE_FALSE } }
{ .type = JUMP_TYPE_FALSE } }
#else /* !CONFIG_JUMP_LABEL */

View File

@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }

View File

@ -2054,6 +2054,7 @@ source "arch/Kconfig"
config RT_MUTEXES
bool
default y if PREEMPT_RT
config BASE_SMALL
int

View File

@ -183,11 +183,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
unsigned long nr_lock_classes;
unsigned long nr_zapped_classes;
#ifndef CONFIG_DEBUG_LOCKDEP
static
#endif
unsigned long max_lock_class_idx;
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
@ -338,7 +336,7 @@ static inline void lock_release_holdtime(struct held_lock *hlock)
* elements. These elements are linked together by the lock_entry member in
* struct lock_class.
*/
LIST_HEAD(all_lock_classes);
static LIST_HEAD(all_lock_classes);
static LIST_HEAD(free_lock_classes);
/**
@ -1252,6 +1250,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
int idx;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@ -1317,6 +1316,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
* of classes.
*/
list_move_tail(&class->lock_entry, &all_lock_classes);
idx = class - lock_classes;
if (idx > max_lock_class_idx)
max_lock_class_idx = idx;
if (verbose(class)) {
graph_unlock();
@ -6000,6 +6002,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
WRITE_ONCE(class->name, NULL);
nr_lock_classes--;
__clear_bit(class - lock_classes, lock_classes_in_use);
if (class - lock_classes == max_lock_class_idx)
max_lock_class_idx--;
} else {
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
class->name);
@ -6011,13 +6015,10 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
static void reinit_class(struct lock_class *class)
{
void *const p = class;
const unsigned int offset = offsetof(struct lock_class, key);
WARN_ON_ONCE(!class->lock_entry.next);
WARN_ON_ONCE(!list_empty(&class->locks_after));
WARN_ON_ONCE(!list_empty(&class->locks_before));
memset(p + offset, 0, sizeof(*class) - offset);
memset_startat(class, 0, key);
WARN_ON_ONCE(!class->lock_entry.next);
WARN_ON_ONCE(!list_empty(&class->locks_after));
WARN_ON_ONCE(!list_empty(&class->locks_before));
@ -6290,7 +6291,13 @@ void lockdep_reset_lock(struct lockdep_map *lock)
lockdep_reset_lock_reg(lock);
}
/* Unregister a dynamically allocated key. */
/*
* Unregister a dynamically allocated key.
*
* Unlike lockdep_register_key(), a search is always done to find a matching
* key irrespective of debug_locks to avoid potential invalid access to freed
* memory in lock_class entry.
*/
void lockdep_unregister_key(struct lock_class_key *key)
{
struct hlist_head *hash_head = keyhashentry(key);
@ -6305,10 +6312,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
return;
raw_local_irq_save(flags);
if (!graph_lock())
goto out_irq;
lockdep_lock();
pf = get_pending_free();
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
if (k == key) {
hlist_del_rcu(&k->hash_entry);
@ -6316,11 +6321,13 @@ void lockdep_unregister_key(struct lock_class_key *key)
break;
}
}
WARN_ON_ONCE(!found);
__lockdep_free_key_range(pf, key, 1);
call_rcu_zapped(pf);
graph_unlock();
out_irq:
WARN_ON_ONCE(!found && debug_locks);
if (found) {
pf = get_pending_free();
__lockdep_free_key_range(pf, key, 1);
call_rcu_zapped(pf);
}
lockdep_unlock();
raw_local_irq_restore(flags);
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */

View File

@ -121,7 +121,6 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
extern struct list_head all_lock_classes;
extern struct lock_chain lock_chains[];
#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
@ -151,6 +150,10 @@ extern unsigned int nr_large_chain_blocks;
extern unsigned int max_lockdep_depth;
extern unsigned int max_bfs_queue_depth;
extern unsigned long max_lock_class_idx;
extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
extern unsigned long lock_classes_in_use[];
#ifdef CONFIG_PROVE_LOCKING
extern unsigned long lockdep_count_forward_deps(struct lock_class *);
@ -205,7 +208,6 @@ struct lockdep_stats {
};
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
#define __debug_atomic_inc(ptr) \
this_cpu_inc(lockdep_stats.ptr);

View File

@ -24,14 +24,33 @@
#include "lockdep_internals.h"
/*
* Since iteration of lock_classes is done without holding the lockdep lock,
* it is not safe to iterate all_lock_classes list directly as the iteration
* may branch off to free_lock_classes or the zapped list. Iteration is done
* directly on the lock_classes array by checking the lock_classes_in_use
* bitmap and max_lock_class_idx.
*/
#define iterate_lock_classes(idx, class) \
for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
idx++, class++)
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
return seq_list_next(v, &all_lock_classes, pos);
struct lock_class *class = v;
++class;
*pos = class - lock_classes;
return (*pos > max_lock_class_idx) ? NULL : class;
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
return seq_list_start_head(&all_lock_classes, *pos);
unsigned long idx = *pos;
if (idx > max_lock_class_idx)
return NULL;
return lock_classes + idx;
}
static void l_stop(struct seq_file *m, void *v)
@ -57,14 +76,16 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v)
{
struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
struct lock_class *class = v;
struct lock_list *entry;
char usage[LOCK_USAGE_CHARS];
int idx = class - lock_classes;
if (v == &all_lock_classes) {
if (v == lock_classes)
seq_printf(m, "all lock classes:\n");
if (!test_bit(idx, lock_classes_in_use))
return 0;
}
seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP
@ -220,8 +241,11 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
#ifdef CONFIG_PROVE_LOCKING
struct lock_class *class;
unsigned long idx;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
iterate_lock_classes(idx, class) {
if (!test_bit(idx, lock_classes_in_use))
continue;
if (class->usage_mask == 0)
nr_unused++;
@ -254,6 +278,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
sum_forward_deps += lockdep_count_forward_deps(class);
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
@ -345,6 +370,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
seq_printf(m, " max bfs queue depth: %11u\n",
max_bfs_queue_depth);
#endif
seq_printf(m, " max lock class index: %11lu\n",
max_lock_class_idx);
lockdep_stats_debug_show(m);
seq_printf(m, " debug_locks: %11u\n",
debug_locks);
@ -622,12 +649,16 @@ static int lock_stat_open(struct inode *inode, struct file *file)
if (!res) {
struct lock_stat_data *iter = data->stats;
struct seq_file *m = file->private_data;
unsigned long idx;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
iterate_lock_classes(idx, class) {
if (!test_bit(idx, lock_classes_in_use))
continue;
iter->class = class;
iter->stats = lock_stats(class);
iter++;
}
data->iter_end = iter;
sort(data->stats, data->iter_end - data->stats,
@ -645,6 +676,7 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct lock_class *class;
unsigned long idx;
char c;
if (count) {
@ -654,8 +686,11 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
if (c != '0')
return count;
list_for_each_entry(class, &all_lock_classes, lock_entry)
iterate_lock_classes(idx, class) {
if (!test_bit(idx, lock_classes_in_use))
continue;
clear_lock_stats(class);
}
}
return count;
}

View File

@ -7,6 +7,7 @@
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/debug.h>
#include <linux/errno.h>
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
@ -162,7 +163,7 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
__set_current_state(TASK_RUNNING);
}
bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
{
if (__percpu_down_read_trylock(sem))
return true;
@ -211,7 +212,7 @@ static bool readers_active_check(struct percpu_rw_semaphore *sem)
return true;
}
void percpu_down_write(struct percpu_rw_semaphore *sem)
void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);

View File

@ -1048,7 +1048,7 @@ out_nolock:
/*
* Wait until we successfully acquire the write lock
*/
static struct rw_semaphore *
static struct rw_semaphore __sched *
rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
{
long count;

View File

@ -2,6 +2,15 @@ cat <<EOF
static __always_inline ${ret}
arch_${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
${int} ret;
if (__native_word(${atomic}_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
ret = arch_${atomic}_read(v);
__atomic_acquire_fence();
}
return ret;
}
EOF

View File

@ -2,6 +2,11 @@ cat <<EOF
static __always_inline void
arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
if (__native_word(${atomic}_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
arch_${atomic}_set(v, i);
}
}
EOF