jump_label,module: Don't alloc static_key_mod for __ro_after_init keys

When a static_key is marked ro_after_init, its state will never change
(after init), therefore jump_label_update() will never need to iterate
the entries, and thus module load won't actually need to track this --
avoiding the static_key::next write.

Therefore, mark these keys such that jump_label_add_module() might
recognise them and avoid the modification.

Use the special state: 'static_key_linked(key) && !static_key_mod(key)'
to denote such keys.

jump_label_add_module() does not exist under CONFIG_JUMP_LABEL=n, so the
newly-introduced jump_label_init_ro() can be defined as a nop for that
configuration.

[ mingo: Renamed jump_label_ro() to jump_label_init_ro() ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Link: https://lore.kernel.org/r/20240313180106.2917308-2-vschneid@redhat.com
This commit is contained in:
Peter Zijlstra 2024-03-13 19:01:03 +01:00 committed by Ingo Molnar
parent 3774b28d8f
commit 91a1d97ef4
4 changed files with 62 additions and 0 deletions

View file

@ -180,6 +180,11 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata; addr < (unsigned long)__end_rodata;
} }
static inline bool is_kernel_ro_after_init(unsigned long addr)
{
return addr >= (unsigned long)__start_ro_after_init &&
addr < (unsigned long)__end_ro_after_init;
}
/** /**
* is_kernel_inittext - checks if the pointer address is located in the * is_kernel_inittext - checks if the pointer address is located in the
* .init.text section * .init.text section

View file

@ -216,6 +216,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[]; extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void); extern void jump_label_init(void);
extern void jump_label_init_ro(void);
extern void jump_label_lock(void); extern void jump_label_lock(void);
extern void jump_label_unlock(void); extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry, extern void arch_jump_label_transform(struct jump_entry *entry,
@ -265,6 +266,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true; static_key_initialized = true;
} }
static __always_inline void jump_label_init_ro(void) { }
static __always_inline bool static_key_false(struct static_key *key) static __always_inline bool static_key_false(struct static_key *key)
{ {
if (unlikely_notrace(static_key_count(key) > 0)) if (unlikely_notrace(static_key_count(key) > 0))

View file

@ -1408,6 +1408,7 @@ static void mark_readonly(void)
* insecure pages which are W+X. * insecure pages which are W+X.
*/ */
flush_module_init_free_work(); flush_module_init_free_work();
jump_label_init_ro();
mark_rodata_ro(); mark_rodata_ro();
debug_checkwx(); debug_checkwx();
rodata_test(); rodata_test();

View file

@ -530,6 +530,45 @@ void __init jump_label_init(void)
cpus_read_unlock(); cpus_read_unlock();
} }
static inline bool static_key_sealed(struct static_key *key)
{
return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK);
}
static inline void static_key_seal(struct static_key *key)
{
unsigned long type = key->type & JUMP_TYPE_TRUE;
key->type = JUMP_TYPE_LINKED | type;
}
void jump_label_init_ro(void)
{
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __stop___jump_table;
struct jump_entry *iter;
if (WARN_ON_ONCE(!static_key_initialized))
return;
cpus_read_lock();
jump_label_lock();
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk = jump_entry_key(iter);
if (!is_kernel_ro_after_init((unsigned long)iterk))
continue;
if (static_key_sealed(iterk))
continue;
static_key_seal(iterk);
}
jump_label_unlock();
cpus_read_unlock();
}
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
enum jump_label_type jump_label_init_type(struct jump_entry *entry) enum jump_label_type jump_label_init_type(struct jump_entry *entry)
@ -650,6 +689,15 @@ static int jump_label_add_module(struct module *mod)
static_key_set_entries(key, iter); static_key_set_entries(key, iter);
continue; continue;
} }
/*
* If the key was sealed at init, then there's no need to keep a
* reference to its module entries - just patch them now and be
* done with it.
*/
if (static_key_sealed(key))
goto do_poke;
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
if (!jlm) if (!jlm)
return -ENOMEM; return -ENOMEM;
@ -675,6 +723,7 @@ static int jump_label_add_module(struct module *mod)
static_key_set_linked(key); static_key_set_linked(key);
/* Only update if we've changed from our initial state */ /* Only update if we've changed from our initial state */
do_poke:
if (jump_label_type(iter) != jump_label_init_type(iter)) if (jump_label_type(iter) != jump_label_init_type(iter))
__jump_label_update(key, iter, iter_stop, true); __jump_label_update(key, iter, iter_stop, true);
} }
@ -699,6 +748,10 @@ static void jump_label_del_module(struct module *mod)
if (within_module((unsigned long)key, mod)) if (within_module((unsigned long)key, mod))
continue; continue;
/* No @jlm allocated because key was sealed at init. */
if (static_key_sealed(key))
continue;
/* No memory during module load */ /* No memory during module load */
if (WARN_ON(!static_key_linked(key))) if (WARN_ON(!static_key_linked(key)))
continue; continue;