x86/mm: Avoid redundant interrupt disable in load_mm_cr4()

load_mm_cr4() is always called with interrupts disabled from:

 - switch_mm_irqs_off()
 - refresh_pce(), which is a on_each_cpu() callback

Thus, disabling interrupts in cr4_set/clear_bits() is redundant.

Implement cr4_set/clear_bits_irqsoff() helpers, rename load_mm_cr4() to
load_mm_cr4_irqsoff() and use the new helpers. The new helpers do not need
a lockdep assert as __cr4_set() has one already.

The renaming in combination with the checks in __cr4_set() ensure that any
changes in the boundary conditions at the call sites will be detected.

[ tglx: Massaged change log ]

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/0fbbcb64-5f26-4ffb-1bb9-4f5f48426893@siemens.com
This commit is contained in:
Jan Kiszka 2019-06-18 09:32:11 +02:00 committed by Thomas Gleixner
parent 5f9e832c13
commit 21e450d21c
4 changed files with 29 additions and 13 deletions

View File

@ -2087,7 +2087,7 @@ static int x86_pmu_event_init(struct perf_event *event)
static void refresh_pce(void *ignored)
{
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
}
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)

View File

@ -28,16 +28,16 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
static inline void load_mm_cr4(struct mm_struct *mm)
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
atomic_read(&mm->context.perf_rdpmc_allowed))
cr4_set_bits(X86_CR4_PCE);
cr4_set_bits_irqsoff(X86_CR4_PCE);
else
cr4_clear_bits(X86_CR4_PCE);
cr4_clear_bits_irqsoff(X86_CR4_PCE);
}
#else
static inline void load_mm_cr4(struct mm_struct *mm) {}
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
#endif
#ifdef CONFIG_MODIFY_LDT_SYSCALL

View File

@ -290,26 +290,42 @@ static inline void __cr4_set(unsigned long cr4)
}
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
static inline void cr4_set_bits_irqsoff(unsigned long mask)
{
unsigned long cr4, flags;
unsigned long cr4;
local_irq_save(flags);
cr4 = this_cpu_read(cpu_tlbstate.cr4);
if ((cr4 | mask) != cr4)
__cr4_set(cr4 | mask);
}
/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits_irqsoff(unsigned long mask)
{
unsigned long cr4;
cr4 = this_cpu_read(cpu_tlbstate.cr4);
if ((cr4 & ~mask) != cr4)
__cr4_set(cr4 & ~mask);
}
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
unsigned long flags;
local_irq_save(flags);
cr4_set_bits_irqsoff(mask);
local_irq_restore(flags);
}
/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
unsigned long cr4, flags;
unsigned long flags;
local_irq_save(flags);
cr4 = this_cpu_read(cpu_tlbstate.cr4);
if ((cr4 & ~mask) != cr4)
__cr4_set(cr4 & ~mask);
cr4_clear_bits_irqsoff(mask);
local_irq_restore(flags);
}

View File

@ -440,7 +440,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
if (next != real_prev) {
load_mm_cr4(next);
load_mm_cr4_irqsoff(next);
switch_ldt(real_prev, next);
}
}