- AMD's automatic IBRS doesn't enable cross-thread branch target

injection protection (STIBP) for user processes. Enable STIBP on such
   systems.
 
 - Do not delete (but put the ref instead) of AMD MCE error thresholding
   sysfs kobjects when destroying them in order not to delete the kernfs
   pointer prematurely
 
 - Restore annotation in ret_from_fork_asm() in order to fix kthread
   stack unwinding from being marked as unreliable and thus breaking
   livepatching
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmTGLFUACgkQEsHwGGHe
 VUpgDRAAm3uatlqiY2M1Gu9BMMmchTkjr2Fq06TmDQ53SGc6FqLKicltBCZsxbrm
 kOrAtmw0jYPTTzqiDy8llyAt+1BC200nAKWTABKhKBrgUiD2crIIC8Rr6YycZ4tm
 ueepk4CCxzh+ffcvGau2OuH05SHwQLeTNPr5Rgk4BlVPToaMdXAJChZA/JXsj4gR
 3EiWV5/UnC6znzmQKN5PG+BmDrrOlsyDCJXYBVH+vQFa0Udit/rx0YZQ5ZOcD8Tn
 D7Ix10pGQV/ESOsD+UFq/u1LPZvJSD2GDsMpWitrw65wnC2TF/XTxBc+pK0mbyKL
 3XmH2NPlp1igv3EZ3hltXUcw6Rv8u3hX7VE5S+eQ0FRXJGjxSwoLC9ndw28oPful
 FlMjrmI9SE5ojssZ6evLN0/dPXHEz8HvRgw5UTy5I+RqpelMWtML5iDIipaMwoUT
 yB9JNIsufY1CM1IHiZBVLZkqIl8X8RtllbJR/RWGfYEHuiXworumgMDp9MsEFY2C
 MHr9+/j9E1vU71CvjIYAaJCfWU1Ce+lYCUZ+1SxyDDe3watJKlduuAXbalmyYe0w
 ExE5Wt+3ghOzwgj4OtofUivXLWMXr4IgpKliO5TrZ3lGyS3LWQv1dJstCZUnknLZ
 A5D/qUSvIXkUdrJbkXrYLQJxtd0ambHc+6ymAIjtMBM8/HF0pR4=
 =49ii
 -----END PGP SIGNATURE-----

Merge tag 'x86_urgent_for_v6.5_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - AMD's automatic IBRS doesn't enable cross-thread branch target
   injection protection (STIBP) for user processes. Enable STIBP on such
   systems.

 - Do not delete (but put the ref instead) of AMD MCE error thresholding
   sysfs kobjects when destroying them in order not to delete the kernfs
   pointer prematurely

 - Restore annotation in ret_from_fork_asm() in order to fix kthread
   stack unwinding from being marked as unreliable and thus breaking
   livepatching

* tag 'x86_urgent_for_v6.5_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu: Enable STIBP on AMD if Automatic IBRS is enabled
  x86/MCE/AMD: Decrement threshold_bank refcount when removing threshold blocks
  x86: Fix kthread unwind
This commit is contained in:
Linus Torvalds 2023-07-30 11:05:35 -07:00
commit d410b62e45
4 changed files with 33 additions and 13 deletions

View File

@ -484,11 +484,14 @@ Spectre variant 2
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
Spectre v2 variant attacks, including cross-thread branch target injections
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
Spectre v2 variant attacks.
Legacy IBRS systems clear the IBRS bit on exit to userspace and
therefore explicitly enable STIBP for that
On Intel's enhanced IBRS systems, this includes cross-thread branch target
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
STIBP, too.
AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.
The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator

View File

@ -285,7 +285,15 @@ SYM_FUNC_END(__switch_to_asm)
*/
.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork_asm)
UNWIND_HINT_REGS
/*
* This is the start of the kernel stack; even through there's a
* register set at the top, the regset isn't necessarily coherent
* (consider kthreads) and one cannot unwind further.
*
* This ensures stack unwinds of kernel threads terminate in a known
* good state.
*/
UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR // copy_thread
CALL_DEPTH_ACCOUNT
@ -295,6 +303,12 @@ SYM_CODE_START(ret_from_fork_asm)
movq %r12, %rcx /* fn_arg */
call ret_from_fork
/*
* Set the stack state to what is expected for the target function
* -- at this point the register set should be a valid user set
* and unwind should work normally.
*/
UNWIND_HINT_REGS
jmp swapgs_restore_regs_and_return_to_usermode
SYM_CODE_END(ret_from_fork_asm)
.popsection

View File

@ -1150,19 +1150,21 @@ spectre_v2_user_select_mitigation(void)
}
/*
* If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
* If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
* is not required.
*
* Enhanced IBRS also protects against cross-thread branch target
* Intel's Enhanced IBRS also protects against cross-thread branch target
* injection in user-mode as the IBRS bit remains always set which
* implicitly enables cross-thread protections. However, in legacy IBRS
* mode, the IBRS bit is set only on kernel entry and cleared on return
* to userspace. This disables the implicit cross-thread protection,
* so allow for STIBP to be selected in that case.
* to userspace. AMD Automatic IBRS also does not protect userspace.
* These modes therefore disable the implicit cross-thread protection,
* so allow for STIBP to be selected in those cases.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
!boot_cpu_has(X86_FEATURE_AUTOIBRS)))
return;
/*
@ -2294,7 +2296,8 @@ static ssize_t mmio_stale_data_show_state(char *buf)
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
!boot_cpu_has(X86_FEATURE_AUTOIBRS))
return "";
switch (spectre_v2_user_stibp) {

View File

@ -1261,10 +1261,10 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
kobject_del(b->kobj);
kobject_put(b->kobj);
list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
kobject_del(&pos->kobj);
kobject_put(b->kobj);
}
static void threshold_remove_bank(struct threshold_bank *bank)