mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 08:58:07 +00:00
2c5ce2dba2
eliminate custom code patching. For that, the alternatives infra is extended to accomodate paravirt's needs and, as a result, a lot of paravirt patching code goes away, leading to a sizeable cleanup and simplification. Work by Juergen Gross. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmCGiXQACgkQEsHwGGHe VUocbw/+OkFzphK6zlNA8O3RJ24u2csXUWWUtpGlZ2220Nn/Bgyso2+fyg/NEeQg EmEttaY3JG/riCDfHk5Xm2saeVtsbPXN4f0sJm/Io/djF7Cm03WS0eS0aA2Rnuca MhmvvkrzYqZXAYVaxKkIH6sNlPgyXX7vDNPbTd/0ZCOb3ZKIyXwL+SaLatMCtE5o ou7e8Bj8xPSwcaCyK6sqjrT6jdpPjoTrxxrwENW8AlRu5lCU1pIY03GGhARPVoEm fWkZsIPn7DxhpyIqzJtEMX8EK1xN96E+NGkNuSAtJGP9HRb+3j5f4s3IUAfXiLXq r7NecFw8zHhPKl9J0pPCiW7JvMrCMU5xGwyeUmmhKyK2BxwvvAC173ohgMlCfB2Q FPIsQWemat17tSue8LIA8SmlSDQz6R+tTdUFT+vqmNV34PxOIEeSdV7HG8rs87Ec dYB9ENUgXqI+h2t7atE68CpTLpWXzNDcq2olEsaEUXenky2hvsi+VxNkWpmlKQ3I NOMU/AyH8oUzn5O0o3oxdPhDLmK5ItEFxjYjwrgLfKFQ+Y8vIMMq3LrKQGwOj+ZU n9qC7JjOwDKZGjd3YqNNRhnXp+w0IJvUHbyr3vIAcp8ohQwEKgpUvpZzf/BKUvHh nJgJSJ53GFJBbVOJMfgVq+JcFr+WO8MDKHaw6zWeCkivFZdSs4g= =h+km -----END PGP SIGNATURE----- Merge tag 'x86_alternatives_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 alternatives/paravirt updates from Borislav Petkov: "First big cleanup to the paravirt infra to use alternatives and thus eliminate custom code patching. For that, the alternatives infrastructure is extended to accomodate paravirt's needs and, as a result, a lot of paravirt patching code goes away, leading to a sizeable cleanup and simplification. Work by Juergen Gross" * tag 'x86_alternatives_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/paravirt: Have only one paravirt patch function x86/paravirt: Switch functions with custom code to ALTERNATIVE x86/paravirt: Add new PVOP_ALT* macros to support pvops in ALTERNATIVEs x86/paravirt: Switch iret pvops to ALTERNATIVE x86/paravirt: Simplify paravirt macros x86/paravirt: Remove no longer needed 32-bit pvops cruft x86/paravirt: Add new features for paravirt patching x86/alternative: Use ALTERNATIVE_TERNARY() in _static_cpu_has() x86/alternative: Support ALTERNATIVE_TERNARY x86/alternative: Support not-feature x86/paravirt: Switch time pvops functions to use static_call() static_call: Add function to query current function static_call: Move struct static_call_key definition to static_call_types.h x86/alternative: Merge include files x86/alternative: Drop unused feature parameter from ALTINSTR_REPLACEMENT()
94 lines
2 KiB
C
94 lines
2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Supervisor Mode Access Prevention support
|
|
*
|
|
* Copyright (C) 2012 Intel Corporation
|
|
* Author: H. Peter Anvin <hpa@linux.intel.com>
|
|
*/
|
|
|
|
#ifndef _ASM_X86_SMAP_H
|
|
#define _ASM_X86_SMAP_H
|
|
|
|
#include <asm/nops.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/alternative.h>
|
|
|
|
/* "Raw" instruction opcodes */
|
|
#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
|
|
#define __ASM_STAC ".byte 0x0f,0x01,0xcb"
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#ifdef CONFIG_X86_SMAP
|
|
|
|
#define ASM_CLAC \
|
|
ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
|
|
|
|
#define ASM_STAC \
|
|
ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
|
|
|
|
#else /* CONFIG_X86_SMAP */
|
|
|
|
#define ASM_CLAC
|
|
#define ASM_STAC
|
|
|
|
#endif /* CONFIG_X86_SMAP */
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_X86_SMAP
|
|
|
|
static __always_inline void clac(void)
|
|
{
|
|
/* Note: a barrier is implicit in alternative() */
|
|
alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
|
|
}
|
|
|
|
static __always_inline void stac(void)
|
|
{
|
|
/* Note: a barrier is implicit in alternative() */
|
|
alternative("", __ASM_STAC, X86_FEATURE_SMAP);
|
|
}
|
|
|
|
static __always_inline unsigned long smap_save(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
asm volatile ("# smap_save\n\t"
|
|
ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC "\n\t",
|
|
X86_FEATURE_SMAP)
|
|
: "=rm" (flags) : : "memory", "cc");
|
|
|
|
return flags;
|
|
}
|
|
|
|
static __always_inline void smap_restore(unsigned long flags)
|
|
{
|
|
asm volatile ("# smap_restore\n\t"
|
|
ALTERNATIVE("", "push %0; popf\n\t",
|
|
X86_FEATURE_SMAP)
|
|
: : "g" (flags) : "memory", "cc");
|
|
}
|
|
|
|
/* These macros can be used in asm() statements */
|
|
#define ASM_CLAC \
|
|
ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
|
|
#define ASM_STAC \
|
|
ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
|
|
|
|
#else /* CONFIG_X86_SMAP */
|
|
|
|
static inline void clac(void) { }
|
|
static inline void stac(void) { }
|
|
|
|
static inline unsigned long smap_save(void) { return 0; }
|
|
static inline void smap_restore(unsigned long flags) { }
|
|
|
|
#define ASM_CLAC
|
|
#define ASM_STAC
|
|
|
|
#endif /* CONFIG_X86_SMAP */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_SMAP_H */
|