- Add the respective UP last level cache mask accessors in order not to

cause segfaults when lscpu accesses their representation in sysfs
 
 - Fix for a race in the alternatives batch patching machinery when
 kprobes are set
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmM5ZDkACgkQEsHwGGHe
 VUrkaA//dXhnPu2AM9x/v7JMZw0BO2peKMNCmO7b6z4+xIXlxNGNYeO766ZqpjSd
 eFJj5Hv9ESOZw4UG5cvPA1Vj14nSa6/03Lo9JBFthl2KLOZEgVrD+GNQEJMqxPi/
 9s1+764NXYi8iILHj7N4epQmz+oIbCUlnHLWZRkmG5ys40cPPI/d5li/rKBK8yIQ
 W89f+WgbqCmpn9Ha8PFYy5uuLxQJnN/McDVZyW2d4MSxJ/FukRl4x1agrfnJq1fb
 xz9Y/ZpVRPQCc4fJbQcTTffyFyg42AAqC0O0jJ5ZsOJDjZoQS7WvkcKYO33FiwKv
 /wo61B+7SxbNMcZYhQGP8BxaBeSPlXmMKaifW+xZDS6RN4zfCq/M1+ziVB45GdUq
 S5hN699vhImciXM5t18wPw6mrpoBBkQYBv+xKkC9ykUw2vxEZ32DeFzwxrybdcGC
 hWKZJAVTQpvzr1FlrUAbBtQnhUTxSAB6EAdTtIuHQ+ts+OcraR8JNe59GCsEdCVI
 as+mfqMKB8lwoSyDwomkeMcx5yL9XYy+STLPsPTHLrYFjqwTBOZgWRGrVZzt0EBo
 0z12tqxpaFc7RI48Vi0qifkeX2Fi63HSBI/Ba+i11a2jM6NT2d2EcO26rDpO6R2S
 6K0N7cD3o0wO+QK2hwxBgGnX8e2aRUE8tjYmW40aclfxl4nh/08=
 =MiiB
 -----END PGP SIGNATURE-----

Merge tag 'x86_urgent_for_v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Add the respective UP last level cache mask accessors in order not to
   cause segfaults when lscpu accesses their representation in sysfs

 - Fix for a race in the alternatives batch patching machinery when
   kprobes are set

* tag 'x86_urgent_for_v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cacheinfo: Add a cpu_llc_shared_mask() UP variant
  x86/alternative: Fix race in try_get_desc()
This commit is contained in:
Linus Torvalds 2022-10-02 09:30:35 -07:00
commit 534b0abc62
2 changed files with 38 additions and 32 deletions

View file

@ -21,16 +21,6 @@ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return per_cpu(cpu_llc_shared_map, cpu);
}
static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
{
return per_cpu(cpu_l2c_shared_map, cpu);
}
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
@ -172,6 +162,16 @@ extern int safe_smp_processor_id(void);
# define safe_smp_processor_id() smp_processor_id()
#endif
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return per_cpu(cpu_llc_shared_map, cpu);
}
static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
{
return per_cpu(cpu_l2c_shared_map, cpu);
}
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void)
@ -179,6 +179,11 @@ static inline int wbinvd_on_all_cpus(void)
wbinvd();
return 0;
}
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus;

View file

@ -1319,22 +1319,23 @@ struct bp_patching_desc {
atomic_t refs;
};
static struct bp_patching_desc *bp_desc;
static struct bp_patching_desc bp_desc;
static __always_inline
struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
struct bp_patching_desc *try_get_desc(void)
{
/* rcu_dereference */
struct bp_patching_desc *desc = __READ_ONCE(*descp);
struct bp_patching_desc *desc = &bp_desc;
if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
if (!arch_atomic_inc_not_zero(&desc->refs))
return NULL;
return desc;
}
static __always_inline void put_desc(struct bp_patching_desc *desc)
static __always_inline void put_desc(void)
{
struct bp_patching_desc *desc = &bp_desc;
smp_mb__before_atomic();
arch_atomic_dec(&desc->refs);
}
@ -1367,15 +1368,15 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
/*
* Having observed our INT3 instruction, we now must observe
* bp_desc:
* bp_desc with non-zero refcount:
*
* bp_desc = desc INT3
* bp_desc.refs = 1 INT3
* WMB RMB
* write INT3 if (desc)
* write INT3 if (bp_desc.refs != 0)
*/
smp_rmb();
desc = try_get_desc(&bp_desc);
desc = try_get_desc();
if (!desc)
return 0;
@ -1429,7 +1430,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
ret = 1;
out_put:
put_desc(desc);
put_desc();
return ret;
}
@ -1460,18 +1461,20 @@ static int tp_vec_nr;
*/
static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
{
struct bp_patching_desc desc = {
.vec = tp,
.nr_entries = nr_entries,
.refs = ATOMIC_INIT(1),
};
unsigned char int3 = INT3_INSN_OPCODE;
unsigned int i;
int do_sync;
lockdep_assert_held(&text_mutex);
smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
bp_desc.vec = tp;
bp_desc.nr_entries = nr_entries;
/*
* Corresponds to the implicit memory barrier in try_get_desc() to
* ensure reading a non-zero refcount provides up to date bp_desc data.
*/
atomic_set_release(&bp_desc.refs, 1);
/*
* Corresponding read barrier in int3 notifier for making sure the
@ -1559,12 +1562,10 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
text_poke_sync();
/*
* Remove and synchronize_rcu(), except we have a very primitive
* refcount based completion.
* Remove and wait for refs to be zero.
*/
WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
if (!atomic_dec_and_test(&desc.refs))
atomic_cond_read_acquire(&desc.refs, !VAL);
if (!atomic_dec_and_test(&bp_desc.refs))
atomic_cond_read_acquire(&bp_desc.refs, !VAL);
}
static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,