mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
1fc654cf6e
We currently have 6 (!) separate naming variants to name temporary instruction buffers that are used for code patching: - insnbuf - insnbuff - insn_buff - insn_buffer - ibuf - ibuffer These are used as local variables, percpu fields and function parameters. Standardize all the names to a single variant: 'insn_buff'. Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
126 lines
3.6 KiB
C
126 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/stringify.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#define PSTART(d, m) \
|
|
patch_data_##d.m
|
|
|
|
#define PEND(d, m) \
|
|
(PSTART(d, m) + sizeof(patch_data_##d.m))
|
|
|
|
#define PATCH(d, m, insn_buff, len) \
|
|
paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
|
|
|
|
#define PATCH_CASE(ops, m, data, insn_buff, len) \
|
|
case PARAVIRT_PATCH(ops.m): \
|
|
return PATCH(data, ops##_##m, insn_buff, len)
|
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
|
struct patch_xxl {
|
|
const unsigned char irq_irq_disable[1];
|
|
const unsigned char irq_irq_enable[1];
|
|
const unsigned char irq_save_fl[2];
|
|
const unsigned char mmu_read_cr2[3];
|
|
const unsigned char mmu_read_cr3[3];
|
|
const unsigned char mmu_write_cr3[3];
|
|
const unsigned char irq_restore_fl[2];
|
|
# ifdef CONFIG_X86_64
|
|
const unsigned char cpu_wbinvd[2];
|
|
const unsigned char cpu_usergs_sysret64[6];
|
|
const unsigned char cpu_swapgs[3];
|
|
const unsigned char mov64[3];
|
|
# else
|
|
const unsigned char cpu_iret[1];
|
|
# endif
|
|
};
|
|
|
|
static const struct patch_xxl patch_data_xxl = {
|
|
.irq_irq_disable = { 0xfa }, // cli
|
|
.irq_irq_enable = { 0xfb }, // sti
|
|
.irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
|
|
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
|
|
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
|
|
# ifdef CONFIG_X86_64
|
|
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
|
|
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
|
|
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
|
|
.cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
|
|
0x48, 0x0f, 0x07 }, // swapgs; sysretq
|
|
.cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
|
|
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
|
|
# else
|
|
.mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3
|
|
.irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf
|
|
.cpu_iret = { 0xcf }, // iret
|
|
# endif
|
|
};
|
|
|
|
unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
return PATCH(xxl, mov64, insn_buff, len);
|
|
#endif
|
|
return 0;
|
|
}
|
|
# endif /* CONFIG_PARAVIRT_XXL */
|
|
|
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
struct patch_lock {
|
|
unsigned char queued_spin_unlock[3];
|
|
unsigned char vcpu_is_preempted[2];
|
|
};
|
|
|
|
static const struct patch_lock patch_data_lock = {
|
|
.vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
|
|
|
|
# ifdef CONFIG_X86_64
|
|
.queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
|
|
# else
|
|
.queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
|
|
# endif
|
|
};
|
|
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
|
|
|
unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
|
|
unsigned int len)
|
|
{
|
|
switch (type) {
|
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
|
PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
|
|
PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
|
|
PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
|
|
PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
|
|
|
|
PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
|
|
PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
|
|
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
|
|
|
|
# ifdef CONFIG_X86_64
|
|
PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
|
|
PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
|
|
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
|
|
# else
|
|
PATCH_CASE(cpu, iret, xxl, insn_buff, len);
|
|
# endif
|
|
#endif
|
|
|
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
case PARAVIRT_PATCH(lock.queued_spin_unlock):
|
|
if (pv_is_native_spin_unlock())
|
|
return PATCH(lock, queued_spin_unlock, insn_buff, len);
|
|
break;
|
|
|
|
case PARAVIRT_PATCH(lock.vcpu_is_preempted):
|
|
if (pv_is_native_vcpu_is_preempted())
|
|
return PATCH(lock, vcpu_is_preempted, insn_buff, len);
|
|
break;
|
|
#endif
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return paravirt_patch_default(type, insn_buff, addr, len);
|
|
}
|